repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/svn/svn_prop.py | trac/tracopt/versioncontrol/svn/svn_prop.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Christian Boos <cboos@edgewall.org>
import posixpath
from genshi.builder import tag
from trac.config import ConfigSection
from trac.core import *
from trac.versioncontrol.api import NoSuchNode, RepositoryManager
from trac.versioncontrol.web_ui.browser import IPropertyRenderer
from trac.versioncontrol.web_ui.changeset import IPropertyDiffRenderer
from trac.util import Ranges, to_ranges
from trac.util.translation import _, tag_
from tracopt.versioncontrol.svn.svn_fs import _path_within_scope
class SubversionPropertyRenderer(Component):
implements(IPropertyRenderer)
svn_externals_section = ConfigSection('svn:externals',
"""The TracBrowser for Subversion can interpret the `svn:externals`
property of folders. By default, it only turns the URLs into links as
Trac can't browse remote repositories.
However, if you have another Trac instance (or an other repository
browser like [http://www.viewvc.org/ ViewVC]) configured to browse the
target repository, then you can instruct Trac which other repository
browser to use for which external URL. This mapping is done in the
`[svn:externals]` section of the TracIni.
Example:
{{{
[svn:externals]
1 = svn://server/repos1 http://trac/proj1/browser/$path?rev=$rev
2 = svn://server/repos2 http://trac/proj2/browser/$path?rev=$rev
3 = http://theirserver.org/svn/eng-soft http://ourserver/viewvc/svn/$path/?pathrev=25914
4 = svn://anotherserver.com/tools_repository http://ourserver/tracs/tools/browser/$path?rev=$rev
}}}
With the above, the
`svn://anotherserver.com/tools_repository/tags/1.1/tools` external will
be mapped to `http://ourserver/tracs/tools/browser/tags/1.1/tools?rev=`
(and `rev` will be set to the appropriate revision number if the
external additionally specifies a revision, see the
[http://svnbook.red-bean.com/en/1.4/svn.advanced.externals.html SVN Book on externals]
for more details).
Note that the number used as a key in the above section is purely used
as a place holder, as the URLs themselves can't be used as a key due to
various limitations in the configuration file parser.
Finally, the relative URLs introduced in
[http://subversion.apache.org/docs/release-notes/1.5.html#externals Subversion 1.5]
are not yet supported.
(''since 0.11'')""")
def __init__(self):
self._externals_map = {}
# IPropertyRenderer methods
def match_property(self, name, mode):
if name in ('svn:externals', 'svn:needs-lock'):
return 4
return 2 if name in ('svn:mergeinfo', 'svnmerge-blocked',
'svnmerge-integrated') else 0
def render_property(self, name, mode, context, props):
if name == 'svn:externals':
return self._render_externals(props[name])
elif name == 'svn:needs-lock':
return self._render_needslock(context)
elif name == 'svn:mergeinfo' or name.startswith('svnmerge-'):
return self._render_mergeinfo(name, mode, context, props)
def _render_externals(self, prop):
if not self._externals_map:
for dummykey, value in self.svn_externals_section.options():
value = value.split()
if len(value) != 2:
self.log.warn("svn:externals entry %s doesn't contain "
"a space-separated key value pair, skipping.",
dummykey)
continue
key, value = value
self._externals_map[key] = value.replace('%', '%%') \
.replace('$path', '%(path)s') \
.replace('$rev', '%(rev)s')
externals = []
for external in prop.splitlines():
elements = external.split()
if not elements:
continue
localpath, rev, url = elements[0], '', elements[-1]
if localpath.startswith('#'):
externals.append((external, None, None, None, None))
continue
if len(elements) == 3:
rev = elements[1]
rev = rev.replace('-r', '')
# retrieve a matching entry in the externals map
prefix = []
base_url = url
while base_url:
if base_url in self._externals_map or base_url == u'/':
break
base_url, pref = posixpath.split(base_url)
prefix.append(pref)
href = self._externals_map.get(base_url)
revstr = ' at revision ' + rev if rev else ''
if not href and (url.startswith('http://') or
url.startswith('https://')):
href = url.replace('%', '%%')
if href:
remotepath = ''
if prefix:
remotepath = posixpath.join(*reversed(prefix))
externals.append((localpath, revstr, base_url, remotepath,
href % {'path': remotepath, 'rev': rev}))
else:
externals.append((localpath, revstr, url, None, None))
externals_data = []
for localpath, rev, url, remotepath, href in externals:
label = localpath
if url is None:
title = ''
elif href:
if url:
url = ' in ' + url
label += rev + url
title = ''.join((remotepath, rev, url))
else:
title = _('No svn:externals configured in trac.ini')
externals_data.append((label, href, title))
return tag.ul([tag.li(tag.a(label, href=href, title=title))
for label, href, title in externals_data])
def _render_needslock(self, context):
return tag.img(src=context.href.chrome('common/lock-locked.png'),
alt="needs lock", title="needs lock")
def _render_mergeinfo(self, name, mode, context, props):
rows = []
for row in props[name].splitlines():
try:
(path, revs) = row.rsplit(':', 1)
rows.append([tag.td(path),
tag.td(revs.replace(',', u',\u200b'))])
except ValueError:
rows.append(tag.td(row, colspan=2))
return tag.table(tag.tbody([tag.tr(row) for row in rows]),
class_='props')
class SubversionMergePropertyRenderer(Component):
implements(IPropertyRenderer)
# IPropertyRenderer methods
def match_property(self, name, mode):
return 4 if name in ('svn:mergeinfo', 'svnmerge-blocked',
'svnmerge-integrated') else 0
def render_property(self, name, mode, context, props):
"""Parse svn:mergeinfo and svnmerge-* properties, converting branch
names to links and providing links to the revision log for merged
and eligible revisions.
"""
has_eligible = name in ('svnmerge-integrated', 'svn:mergeinfo')
revs_label = _('blocked') if name.endswith('blocked') else _('merged')
revs_cols = 2 if has_eligible else None
reponame = context.resource.parent.id
target_path = context.resource.id
repos = RepositoryManager(self.env).get_repository(reponame)
target_rev = context.resource.version
if has_eligible:
node = repos.get_node(target_path, target_rev)
branch_starts = {}
for path, rev in node.get_copy_ancestry():
if path not in branch_starts:
branch_starts[path] = rev + 1
rows = []
if name.startswith('svnmerge-'):
sources = props[name].split()
else:
sources = props[name].splitlines()
for line in sources:
path, revs = line.split(':', 1)
spath = _path_within_scope(repos.scope, path)
if spath is None:
continue
revs = revs.strip()
inheritable, non_inheritable = _partition_inheritable(revs)
revs = ','.join(inheritable)
deleted = False
try:
node = repos.get_node(spath, target_rev)
resource = context.resource.parent.child('source', spath)
if 'LOG_VIEW' in context.perm(resource):
row = [_get_source_link(spath, context),
_get_revs_link(revs_label, context, spath, revs)]
if non_inheritable:
non_inheritable = ','.join(non_inheritable)
row.append(_get_revs_link(_('non-inheritable'), context,
spath, non_inheritable,
_('merged on the directory '
'itself but not below')))
if has_eligible:
first_rev = branch_starts.get(spath)
if not first_rev:
first_rev = node.get_branch_origin()
eligible = set(xrange(first_rev or 1, target_rev + 1))
eligible -= set(Ranges(revs))
blocked = _get_blocked_revs(props, name, spath)
if blocked:
eligible -= set(Ranges(blocked))
if eligible:
nrevs = repos._get_node_revs(spath, max(eligible),
min(eligible))
eligible &= set(nrevs)
eligible = to_ranges(eligible)
row.append(_get_revs_link(_('eligible'), context,
spath, eligible))
rows.append((False, spath, [tag.td(each) for each in row]))
continue
except NoSuchNode:
deleted = True
revs = revs.replace(',', u',\u200b')
rows.append((deleted, spath,
[tag.td('/' + spath),
tag.td(revs, colspan=revs_cols)]))
if not rows:
return None
rows.sort()
has_deleted = rows[-1][0] if rows else None
return tag(has_deleted and tag.a(_('(toggle deleted branches)'),
class_='trac-toggledeleted',
href='#'),
tag.table(tag.tbody(
[tag.tr(row, class_='trac-deleted' if deleted else None)
for deleted, spath, row in rows]), class_='props'))
def _partition_inheritable(revs):
"""Non-inheritable revision ranges are marked with a trailing '*'."""
inheritable, non_inheritable = [], []
for r in revs.split(','):
if r and r[-1] == '*':
non_inheritable.append(r[:-1])
else:
inheritable.append(r)
return inheritable, non_inheritable
def _get_blocked_revs(props, name, path):
"""Return the revisions blocked from merging for the given property
name and path.
"""
if name == 'svnmerge-integrated':
prop = props.get('svnmerge-blocked', '')
else:
return ""
for line in prop.splitlines():
try:
p, revs = line.split(':', 1)
if p.strip('/') == path:
return revs
except Exception:
pass
return ""
def _get_source_link(spath, context):
"""Return a link to a merge source."""
reponame = context.resource.parent.id
return tag.a('/' + spath, title=_('View merge source'),
href=context.href.browser(reponame or None, spath,
rev=context.resource.version))
def _get_revs_link(label, context, spath, revs, title=None):
"""Return a link to the revision log when more than one revision is
given, to the revision itself for a single revision, or a `<span>`
with "no revision" for none.
"""
reponame = context.resource.parent.id
if not revs:
return tag.span(label, title=_('No revisions'))
elif ',' in revs or '-' in revs:
revs_href = context.href.log(reponame or None, spath, revs=revs)
else:
revs_href = context.href.changeset(revs, reponame or None, spath)
revs = revs.replace(',', ', ')
if title:
title = _("%(title)s: %(revs)s", title=title, revs=revs)
else:
title = revs
return tag.a(label, title=title, href=revs_href)
class SubversionMergePropertyDiffRenderer(Component):
implements(IPropertyDiffRenderer)
# IPropertyDiffRenderer methods
def match_property_diff(self, name):
return 4 if name in ('svn:mergeinfo', 'svnmerge-blocked',
'svnmerge-integrated') else 0
def render_property_diff(self, name, old_context, old_props,
new_context, new_props, options):
# Build 5 columns table showing modifications on merge sources
# || source || added || removed || added (ni) || removed (ni) ||
# || source || removed ||
rm = RepositoryManager(self.env)
repos = rm.get_repository(old_context.resource.parent.id)
def parse_sources(props):
sources = {}
for line in props[name].splitlines():
path, revs = line.split(':', 1)
spath = _path_within_scope(repos.scope, path)
if spath is not None:
inheritable, non_inheritable = _partition_inheritable(revs)
sources[spath] = (set(Ranges(inheritable)),
set(Ranges(non_inheritable)))
return sources
old_sources = parse_sources(old_props)
new_sources = parse_sources(new_props)
# Go through new sources, detect modified ones or added ones
blocked = name.endswith('blocked')
added_label = [_("merged: "), _("blocked: ")][blocked]
removed_label = [_("reverse-merged: "), _("un-blocked: ")][blocked]
added_ni_label = _("marked as non-inheritable: ")
removed_ni_label = _("unmarked as non-inheritable: ")
def revs_link(revs, context):
if revs:
revs = to_ranges(revs)
return _get_revs_link(revs.replace(',', u',\u200b'),
context, spath, revs)
modified_sources = []
for spath, (new_revs, new_revs_ni) in new_sources.iteritems():
if spath in old_sources:
(old_revs, old_revs_ni), status = old_sources.pop(spath), None
else:
old_revs = old_revs_ni = set()
status = _(' (added)')
added = new_revs - old_revs
removed = old_revs - new_revs
added_ni = new_revs_ni - old_revs_ni
removed_ni = old_revs_ni - new_revs_ni
try:
all_revs = set(repos._get_node_revs(spath))
# TODO: also pass first_rev here, for getting smaller a set
# (this is an optmization fix, result is already correct)
added &= all_revs
removed &= all_revs
added_ni &= all_revs
removed_ni &= all_revs
except NoSuchNode:
pass
if added or removed:
modified_sources.append((
spath, [_get_source_link(spath, new_context), status],
added and tag(added_label, revs_link(added, new_context)),
removed and tag(removed_label,
revs_link(removed, old_context)),
added_ni and tag(added_ni_label,
revs_link(added_ni, new_context)),
removed_ni and tag(removed_ni_label,
revs_link(removed_ni, old_context))
))
# Go through remaining old sources, those were deleted
removed_sources = []
for spath, old_revs in old_sources.iteritems():
removed_sources.append((spath,
_get_source_link(spath, old_context)))
if modified_sources or removed_sources:
modified_sources.sort()
removed_sources.sort()
changes = tag.table(tag.tbody(
[tag.tr(tag.td(c) for c in cols[1:])
for cols in modified_sources],
[tag.tr(tag.td(src), tag.td(_('removed'), colspan=4))
for spath, src in removed_sources]), class_='props')
else:
changes = tag.em(_(' (with no actual effect on merging)'))
return tag.li(tag_('Property %(prop)s changed', prop=tag.strong(name)),
changes)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/svn/svn_fs.py | trac/tracopt/versioncontrol/svn/svn_fs.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2011 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Christian Boos <cboos@edgewall.org>
"""
Note about Unicode
------------------
The Subversion bindings are not unicode-aware and they expect to
receive UTF-8 encoded `string` parameters,
On the other hand, all paths manipulated by Trac are `unicode`
objects.
Therefore:
* before being handed out to SVN, the Trac paths have to be encoded
to UTF-8, using `_to_svn()`
* before being handed out to Trac, a SVN path has to be decoded from
UTF-8, using `_from_svn()`
Whenever a value has to be stored as utf8, we explicitly mark the
variable name with "_utf8", in order to avoid any possible confusion.
Warning:
`SubversionNode.get_content()` returns an object from which one can
read a stream of bytes. NO guarantees can be given about what that
stream of bytes represents. It might be some text, encoded in some
way or another. SVN properties *might* give some hints about the
content, but they actually only reflect the beliefs of whomever set
those properties...
"""
import os.path
import weakref
import posixpath
from trac.config import ListOption
from trac.core import *
from trac.env import ISystemInfoProvider
from trac.versioncontrol import Changeset, Node, Repository, \
IRepositoryConnector, \
NoSuchChangeset, NoSuchNode
from trac.versioncontrol.cache import CachedRepository
from trac.util import embedded_numbers
from trac.util.text import exception_to_unicode, to_unicode
from trac.util.translation import _
from trac.util.datefmt import from_utimestamp
application_pool = None
def _import_svn():
global fs, repos, core, delta, _kindmap
from svn import fs, repos, core, delta
_kindmap = {core.svn_node_dir: Node.DIRECTORY,
core.svn_node_file: Node.FILE}
# Protect svn.core methods from GC
Pool.apr_pool_clear = staticmethod(core.apr_pool_clear)
Pool.apr_pool_destroy = staticmethod(core.apr_pool_destroy)
def _to_svn(pool, *args):
"""Expect a pool and a list of `unicode` path components.
Returns an UTF-8 encoded string suitable for the Subversion python
bindings (the returned path never starts with a leading "/")
"""
return core.svn_path_canonicalize('/'.join(args).lstrip('/')
.encode('utf-8'),
pool)
def _from_svn(path):
"""Expect an UTF-8 encoded string and transform it to an `unicode` object
But Subversion repositories built from conversion utilities can have
non-UTF-8 byte strings, so we have to convert using `to_unicode`.
"""
return path and to_unicode(path, 'utf-8')
# The following 3 helpers deal with unicode paths
def _normalize_path(path):
"""Remove leading "/", except for the root."""
return path and path.strip('/') or '/'
def _path_within_scope(scope, fullpath):
"""Remove the leading scope from repository paths.
Return `None` if the path is not is scope.
"""
if fullpath is not None:
fullpath = fullpath.lstrip('/')
if scope == '/':
return _normalize_path(fullpath)
scope = scope.strip('/')
if (fullpath + '/').startswith(scope + '/'):
return fullpath[len(scope) + 1:] or '/'
def _is_path_within_scope(scope, fullpath):
"""Check whether the given `fullpath` is within the given `scope`"""
if scope == '/':
return fullpath is not None
fullpath = fullpath.lstrip('/') if fullpath else ''
scope = scope.strip('/')
return (fullpath + '/').startswith(scope + '/')
# svn_opt_revision_t helpers
def _svn_rev(num):
value = core.svn_opt_revision_value_t()
value.number = num
revision = core.svn_opt_revision_t()
revision.kind = core.svn_opt_revision_number
revision.value = value
return revision
def _svn_head():
revision = core.svn_opt_revision_t()
revision.kind = core.svn_opt_revision_head
return revision
# apr_pool_t helpers
def _mark_weakpool_invalid(weakpool):
if weakpool():
weakpool()._mark_invalid()
class Pool(object):
"""A Pythonic memory pool object"""
def __init__(self, parent_pool=None):
"""Create a new memory pool"""
global application_pool
self._parent_pool = parent_pool or application_pool
# Create pool
if self._parent_pool:
self._pool = core.svn_pool_create(self._parent_pool())
else:
# If we are an application-level pool,
# then initialize APR and set this pool
# to be the application-level pool
core.apr_initialize()
application_pool = self
self._pool = core.svn_pool_create(None)
self._mark_valid()
def __call__(self):
return self._pool
def valid(self):
"""Check whether this memory pool and its parents
are still valid"""
return hasattr(self,"_is_valid")
def assert_valid(self):
"""Assert that this memory_pool is still valid."""
assert self.valid()
def clear(self):
"""Clear embedded memory pool. Invalidate all subpools."""
self.apr_pool_clear(self._pool)
self._mark_valid()
def destroy(self):
"""Destroy embedded memory pool. If you do not destroy
the memory pool manually, Python will destroy it
automatically."""
global application_pool
self.assert_valid()
# Destroy pool
self.apr_pool_destroy(self._pool)
# Clear application pool and terminate APR if necessary
if not self._parent_pool:
application_pool = None
self._mark_invalid()
def __del__(self):
"""Automatically destroy memory pools, if necessary"""
if self.valid():
self.destroy()
def _mark_valid(self):
"""Mark pool as valid"""
if self._parent_pool:
# Refer to self using a weakreference so that we don't
# create a reference cycle
weakself = weakref.ref(self)
# Set up callbacks to mark pool as invalid when parents
# are destroyed
self._weakref = weakref.ref(self._parent_pool._is_valid,
lambda x: \
_mark_weakpool_invalid(weakself))
# mark pool as valid
self._is_valid = lambda: 1
def _mark_invalid(self):
"""Mark pool as invalid"""
if self.valid():
# Mark invalid
del self._is_valid
# Free up memory
del self._parent_pool
if hasattr(self, "_weakref"):
del self._weakref
class SvnCachedRepository(CachedRepository):
"""Subversion-specific cached repository, zero-pads revision numbers
in the cache tables.
"""
has_linear_changesets = True
def db_rev(self, rev):
return '%010d' % rev
def rev_db(self, rev):
return int(rev or 0)
class SubversionConnector(Component):
implements(ISystemInfoProvider, IRepositoryConnector)
branches = ListOption('svn', 'branches', 'trunk, branches/*', doc=
"""Comma separated list of paths categorized as branches.
If a path ends with '*', then all the directory entries found below
that path will be included.
Example: `/trunk, /branches/*, /projectAlpha/trunk, /sandbox/*`
""")
tags = ListOption('svn', 'tags', 'tags/*', doc=
"""Comma separated list of paths categorized as tags.
If a path ends with '*', then all the directory entries found below
that path will be included.
Example: `/tags/*, /projectAlpha/tags/A-1.0, /projectAlpha/tags/A-v1.1`
""")
error = None
def __init__(self):
self._version = None
try:
_import_svn()
self.log.debug('Subversion bindings imported')
except ImportError, e:
self.error = e
self.log.info('Failed to load Subversion bindings', exc_info=True)
else:
version = (core.SVN_VER_MAJOR, core.SVN_VER_MINOR,
core.SVN_VER_MICRO)
self._version = '%d.%d.%d' % version + core.SVN_VER_TAG
if version[0] < 1:
self.error = _("Subversion >= 1.0 required, found %(version)s",
version=self._version)
Pool()
# ISystemInfoProvider methods
def get_system_info(self):
if self._version is not None:
yield 'Subversion', self._version
# IRepositoryConnector methods
def get_supported_types(self):
prio = 1
if self.error:
prio = -1
yield ("direct-svnfs", prio * 4)
yield ("svnfs", prio * 4)
yield ("svn", prio * 2)
def get_repository(self, type, dir, params):
"""Return a `SubversionRepository`.
The repository is wrapped in a `CachedRepository`, unless `type` is
'direct-svnfs'.
"""
params.update(tags=self.tags, branches=self.branches)
repos = SubversionRepository(dir, params, self.log)
if type != 'direct-svnfs':
repos = SvnCachedRepository(self.env, repos, self.log)
return repos
class SubversionRepository(Repository):
"""Repository implementation based on the svn.fs API."""
has_linear_changesets = True
def __init__(self, path, params, log):
self.log = log
self.pool = Pool()
# Remove any trailing slash or else subversion might abort
if isinstance(path, unicode):
path_utf8 = path.encode('utf-8')
else: # note that this should usually not happen (unicode arg expected)
path_utf8 = to_unicode(path).encode('utf-8')
path_utf8 = os.path.normpath(path_utf8).replace('\\', '/')
self.path = path_utf8.decode('utf-8')
root_path_utf8 = repos.svn_repos_find_root_path(path_utf8, self.pool())
if root_path_utf8 is None:
raise TracError(_("%(path)s does not appear to be a Subversion "
"repository.", path=to_unicode(path_utf8)))
try:
self.repos = repos.svn_repos_open(root_path_utf8, self.pool())
except core.SubversionException, e:
raise TracError(_("Couldn't open Subversion repository %(path)s: "
"%(svn_error)s", path=to_unicode(path_utf8),
svn_error=exception_to_unicode(e)))
self.fs_ptr = repos.svn_repos_fs(self.repos)
self.uuid = fs.get_uuid(self.fs_ptr, self.pool())
self.base = 'svn:%s:%s' % (self.uuid, _from_svn(root_path_utf8))
name = 'svn:%s:%s' % (self.uuid, self.path)
Repository.__init__(self, name, params, log)
# if root_path_utf8 is shorter than the path_utf8, the difference is
# this scope (which always starts with a '/')
if root_path_utf8 != path_utf8:
self.scope = path_utf8[len(root_path_utf8):].decode('utf-8')
if not self.scope[-1] == '/':
self.scope += '/'
else:
self.scope = '/'
assert self.scope[0] == '/'
# we keep root_path_utf8 for RA
ra_prefix = 'file:///' if os.name == 'nt' else 'file://'
self.ra_url_utf8 = ra_prefix + root_path_utf8
self.clear()
def clear(self, youngest_rev=None):
"""Reset notion of `youngest` and `oldest`"""
self.youngest = None
if youngest_rev is not None:
self.youngest = self.normalize_rev(youngest_rev)
self.oldest = None
def __del__(self):
self.close()
def has_node(self, path, rev=None, pool=None):
"""Check if `path` exists at `rev` (or latest if unspecified)"""
if not pool:
pool = self.pool
rev = self.normalize_rev(rev)
rev_root = fs.revision_root(self.fs_ptr, rev, pool())
node_type = fs.check_path(rev_root, _to_svn(pool(), self.scope, path),
pool())
return node_type in _kindmap
def normalize_path(self, path):
"""Take any path specification and produce a path suitable for
the rest of the API
"""
return _normalize_path(path)
def normalize_rev(self, rev):
"""Take any revision specification and produce a revision suitable
for the rest of the API
"""
if rev is None or isinstance(rev, basestring) and \
rev.lower() in ('', 'head', 'latest', 'youngest'):
return self.youngest_rev
else:
try:
rev = int(rev)
if rev <= self.youngest_rev:
return rev
except (ValueError, TypeError):
pass
raise NoSuchChangeset(rev)
def close(self):
"""Dispose of low-level resources associated to this repository."""
if self.pool:
self.pool.destroy()
self.repos = self.fs_ptr = self.pool = None
def get_base(self):
"""Retrieve the base path corresponding to the Subversion
repository itself.
This is the same as the `.path` property minus the
intra-repository scope, if one was specified.
"""
return self.base
def _get_tags_or_branches(self, paths):
"""Retrieve known branches or tags."""
for path in self.params.get(paths, []):
if path.endswith('*'):
folder = posixpath.dirname(path)
try:
entries = [n for n in self.get_node(folder).get_entries()]
for node in sorted(entries, key=lambda n:
embedded_numbers(n.path.lower())):
if node.kind == Node.DIRECTORY:
yield node
except Exception: # no right (TODO: use a specific Exception)
pass
else:
try:
yield self.get_node(path)
except Exception: # no right
pass
def get_quickjump_entries(self, rev):
"""Retrieve known branches, as (name, id) pairs.
Purposedly ignores `rev` and always takes the last revision.
"""
for n in self._get_tags_or_branches('branches'):
yield 'branches', n.path, n.path, None
for n in self._get_tags_or_branches('tags'):
yield 'tags', n.path, n.created_path, n.created_rev
def get_path_url(self, path, rev):
"""Retrieve the "native" URL from which this repository is reachable
from Subversion clients.
"""
url = self.params.get('url', '').rstrip('/')
if url:
if not path or path == '/':
return url
return url + '/' + path.lstrip('/')
def get_changeset(self, rev):
"""Produce a `SubversionChangeset` from given revision
specification"""
rev = self.normalize_rev(rev)
return SubversionChangeset(self, rev, self.scope, self.pool)
def get_changeset_uid(self, rev):
"""Build a value identifying the `rev` in this repository."""
return (self.uuid, rev)
def get_node(self, path, rev=None):
"""Produce a `SubversionNode` from given path and optionally revision
specifications. No revision given means use the latest.
"""
path = path or ''
if path and path[-1] == '/':
path = path[:-1]
rev = self.normalize_rev(rev) or self.youngest_rev
return SubversionNode(path, rev, self, self.pool)
def _get_node_revs(self, path, last=None, first=None):
"""Return the revisions affecting `path` between `first` and `last`
revs. If `first` is not given, it goes down to the revision in which
the branch was created.
"""
node = self.get_node(path, last)
revs = []
for (p, r, chg) in node.get_history():
if p != path or (first and r < first):
break
revs.append(r)
return revs
def _history(self, path, start, end, pool):
"""`path` is a unicode path in the scope.
Generator yielding `(path, rev)` pairs, where `path` is an `unicode`
object. Must start with `(path, created rev)`.
(wraps ``fs.node_history``)
"""
path_utf8 = _to_svn(pool(), self.scope, path)
if start < end:
start, end = end, start
if (start, end) == (1, 0): # only happens for empty repos
return
root = fs.revision_root(self.fs_ptr, start, pool())
# fs.node_history leaks when path doesn't exist (#6588)
if fs.check_path(root, path_utf8, pool()) == core.svn_node_none:
return
tmp1 = Pool(pool)
tmp2 = Pool(pool)
history_ptr = fs.node_history(root, path_utf8, tmp1())
cross_copies = 1
while history_ptr:
history_ptr = fs.history_prev(history_ptr, cross_copies, tmp2())
tmp1.clear()
tmp1, tmp2 = tmp2, tmp1
if history_ptr:
path_utf8, rev = fs.history_location(history_ptr, tmp2())
tmp2.clear()
if rev < end:
break
path = _from_svn(path_utf8)
yield path, rev
del tmp1
del tmp2
def _previous_rev(self, rev, path='', pool=None):
if rev > 1: # don't use oldest here, as it's too expensive
for _, prev in self._history(path, 1, rev-1, pool or self.pool):
return prev
return None
def get_oldest_rev(self):
"""Gives an approximation of the oldest revision."""
if self.oldest is None:
self.oldest = 1
# trying to figure out the oldest rev for scoped repository
# is too expensive and uncovers a big memory leak (#5213)
# if self.scope != '/':
# self.oldest = self.next_rev(0, find_initial_rev=True)
return self.oldest
def get_youngest_rev(self):
"""Retrieve the latest revision in the repository.
(wraps ``fs.youngest_rev``)
"""
if not self.youngest:
self.youngest = fs.youngest_rev(self.fs_ptr, self.pool())
if self.scope != '/':
for path, rev in self._history('', 1, self.youngest, self.pool):
self.youngest = rev
break
return self.youngest
def previous_rev(self, rev, path=''):
"""Return revision immediately preceeding `rev`, eventually below
given `path` or globally.
"""
# FIXME optimize for non-scoped
rev = self.normalize_rev(rev)
return self._previous_rev(rev, path)
def next_rev(self, rev, path='', find_initial_rev=False):
"""Return revision immediately following `rev`, eventually below
given `path` or globally.
"""
rev = self.normalize_rev(rev)
next = rev + 1
youngest = self.youngest_rev
subpool = Pool(self.pool)
while next <= youngest:
subpool.clear()
for _, next in self._history(path, rev+1, next, subpool):
return next
else:
if not find_initial_rev and \
not self.has_node(path, next, subpool):
return next # a 'delete' event is also interesting...
next += 1
return None
def rev_older_than(self, rev1, rev2):
"""Check relative order between two revision specifications."""
return self.normalize_rev(rev1) < self.normalize_rev(rev2)
def get_path_history(self, path, rev=None, limit=None):
"""Retrieve creation and deletion events that happened on
given `path`.
"""
path = self.normalize_path(path)
rev = self.normalize_rev(rev)
expect_deletion = False
subpool = Pool(self.pool)
numrevs = 0
while rev and (not limit or numrevs < limit):
subpool.clear()
if self.has_node(path, rev, subpool):
if expect_deletion:
# it was missing, now it's there again:
# rev+1 must be a delete
numrevs += 1
yield path, rev+1, Changeset.DELETE
newer = None # 'newer' is the previously seen history tuple
older = None # 'older' is the currently examined history tuple
for p, r in self._history(path, 1, rev, subpool):
older = (_path_within_scope(self.scope, p), r,
Changeset.ADD)
rev = self._previous_rev(r, pool=subpool)
if newer:
numrevs += 1
if older[0] == path:
# still on the path: 'newer' was an edit
yield newer[0], newer[1], Changeset.EDIT
else:
# the path changed: 'newer' was a copy
rev = self._previous_rev(newer[1], pool=subpool)
# restart before the copy op
yield newer[0], newer[1], Changeset.COPY
older = (older[0], older[1], 'unknown')
break
newer = older
if older:
# either a real ADD or the source of a COPY
numrevs += 1
yield older
else:
expect_deletion = True
rev = self._previous_rev(rev, pool=subpool)
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=0):
"""Determine differences between two arbitrary pairs of paths
and revisions.
(wraps ``repos.svn_repos_dir_delta``)
"""
def key(value):
return value[1].path if value[1] is not None else value[0].path
return iter(sorted(self._get_changes(old_path, old_rev, new_path,
new_rev, ignore_ancestry),
key=key))
def _get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry):
old_node = new_node = None
old_rev = self.normalize_rev(old_rev)
new_rev = self.normalize_rev(new_rev)
if self.has_node(old_path, old_rev):
old_node = self.get_node(old_path, old_rev)
else:
raise NoSuchNode(old_path, old_rev, 'The Base for Diff is invalid')
if self.has_node(new_path, new_rev):
new_node = self.get_node(new_path, new_rev)
else:
raise NoSuchNode(new_path, new_rev,
'The Target for Diff is invalid')
if new_node.kind != old_node.kind:
raise TracError(_('Diff mismatch: Base is a %(oldnode)s '
'(%(oldpath)s in revision %(oldrev)s) and '
'Target is a %(newnode)s (%(newpath)s in '
'revision %(newrev)s).', oldnode=old_node.kind,
oldpath=old_path, oldrev=old_rev,
newnode=new_node.kind, newpath=new_path,
newrev=new_rev))
subpool = Pool(self.pool)
if new_node.isdir:
editor = DiffChangeEditor()
e_ptr, e_baton = delta.make_editor(editor, subpool())
old_root = fs.revision_root(self.fs_ptr, old_rev, subpool())
new_root = fs.revision_root(self.fs_ptr, new_rev, subpool())
def authz_cb(root, path, pool):
return 1
text_deltas = 0 # as this is anyway re-done in Diff.py...
entry_props = 0 # "... typically used only for working copy updates"
repos.svn_repos_dir_delta(old_root,
_to_svn(subpool(), self.scope, old_path),
'', new_root,
_to_svn(subpool(), self.scope, new_path),
e_ptr, e_baton, authz_cb,
text_deltas,
1, # directory
entry_props,
ignore_ancestry,
subpool())
for path, kind, change in editor.deltas:
path = _from_svn(path)
old_node = new_node = None
if change != Changeset.ADD:
old_node = self.get_node(posixpath.join(old_path, path),
old_rev)
if change != Changeset.DELETE:
new_node = self.get_node(posixpath.join(new_path, path),
new_rev)
else:
kind = _kindmap[fs.check_path(old_root,
_to_svn(subpool(),
self.scope,
old_node.path),
subpool())]
yield (old_node, new_node, kind, change)
else:
old_root = fs.revision_root(self.fs_ptr, old_rev, subpool())
new_root = fs.revision_root(self.fs_ptr, new_rev, subpool())
if fs.contents_changed(old_root,
_to_svn(subpool(), self.scope, old_path),
new_root,
_to_svn(subpool(), self.scope, new_path),
subpool()):
yield (old_node, new_node, Node.FILE, Changeset.EDIT)
class SubversionNode(Node):
def __init__(self, path, rev, repos, pool=None, parent_root=None):
self.fs_ptr = repos.fs_ptr
self.scope = repos.scope
self.pool = Pool(pool)
pool = self.pool()
self._scoped_path_utf8 = _to_svn(pool, self.scope, path)
if parent_root:
self.root = parent_root
else:
self.root = fs.revision_root(self.fs_ptr, rev, pool)
node_type = fs.check_path(self.root, self._scoped_path_utf8, pool)
if not node_type in _kindmap:
raise NoSuchNode(path, rev)
cp_utf8 = fs.node_created_path(self.root, self._scoped_path_utf8, pool)
cp = _from_svn(cp_utf8)
cr = fs.node_created_rev(self.root, self._scoped_path_utf8, pool)
# Note: `cp` differs from `path` if the last change was a copy,
# In that case, `path` doesn't even exist at `cr`.
# The only guarantees are:
# * this node exists at (path,rev)
# * the node existed at (created_path,created_rev)
# Also, `cp` might well be out of the scope of the repository,
# in this case, we _don't_ use the ''create'' information.
if _is_path_within_scope(self.scope, cp):
self.created_rev = cr
self.created_path = _path_within_scope(self.scope, cp)
else:
self.created_rev, self.created_path = rev, path
# TODO: check node id
Node.__init__(self, repos, path, rev, _kindmap[node_type])
def get_content(self):
"""Retrieve raw content as a "read()"able object."""
if self.isdir:
return None
pool = Pool(self.pool)
s = core.Stream(fs.file_contents(self.root, self._scoped_path_utf8,
pool()))
# The stream object needs to reference the pool to make sure the pool
# is not destroyed before the former.
s._pool = pool
return s
def get_entries(self):
"""Yield `SubversionNode` corresponding to entries in this directory.
(wraps ``fs.dir_entries``)
"""
if self.isfile:
return
pool = Pool(self.pool)
entries = fs.dir_entries(self.root, self._scoped_path_utf8, pool())
for item in entries.keys():
path = posixpath.join(self.path, _from_svn(item))
yield SubversionNode(path, self.rev, self.repos, self.pool,
self.root)
def get_history(self, limit=None):
"""Yield change events that happened on this path"""
newer = None # 'newer' is the previously seen history tuple
older = None # 'older' is the currently examined history tuple
pool = Pool(self.pool)
numrevs = 0
for path, rev in self.repos._history(self.path, 1, self.rev, pool):
path = _path_within_scope(self.scope, path)
if rev > 0 and path:
older = (path, rev, Changeset.ADD)
if newer:
if newer[0] == older[0]: # stay on same path
change = Changeset.EDIT
else:
change = Changeset.COPY
newer = (newer[0], newer[1], change)
numrevs += 1
yield newer
newer = older
if limit and numrevs >= limit:
break
if newer and (not limit or numrevs < limit):
yield newer
def get_annotations(self):
"""Return a list the last changed revision for each line.
(wraps ``client.blame2``)
"""
annotations = []
if self.isfile:
def blame_receiver(line_no, revision, author, date, line, pool):
annotations.append(revision)
try:
rev = _svn_rev(self.rev)
start = _svn_rev(0)
file_url_utf8 = posixpath.join(self.repos.ra_url_utf8,
self._scoped_path_utf8)
self.repos.log.info('opening ra_local session to %r',
file_url_utf8)
from svn import client
client.blame2(file_url_utf8, rev, start, rev, blame_receiver,
client.create_context(), self.pool())
except (core.SubversionException, AttributeError), e:
# svn thinks file is a binary or blame not supported
raise TracError(_('svn blame failed on %(path)s: %(error)s',
path=self.path, error=to_unicode(e)))
return annotations
# def get_previous(self):
# # FIXME: redo it with fs.node_history
def get_properties(self):
"""Return `dict` of node properties at current revision.
(wraps ``fs.node_proplist``)
"""
props = fs.node_proplist(self.root, self._scoped_path_utf8, self.pool())
for name, value in props.items():
# Note that property values can be arbitrary binary values
# so we can't assume they are UTF-8 strings...
props[_from_svn(name)] = to_unicode(value)
return props
def get_content_length(self):
"""Retrieve byte size of a file.
Return `None` for a folder. (wraps ``fs.file_length``)
"""
if self.isdir:
return None
return fs.file_length(self.root, self._scoped_path_utf8, self.pool())
def get_content_type(self):
"""Retrieve mime-type property of a file.
Return `None` for a folder. (wraps ``fs.revision_prop``)
"""
if self.isdir:
return None
return self._get_prop(core.SVN_PROP_MIME_TYPE)
def get_last_modified(self):
"""Retrieve timestamp of last modification, in micro-seconds.
(wraps ``fs.revision_prop``)
"""
_date = fs.revision_prop(self.fs_ptr, self.created_rev,
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/svn/__init__.py | trac/tracopt/versioncontrol/svn/__init__.py | python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false | |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/svn/tests/svn_fs.py | trac/tracopt/versioncontrol/svn/tests/svn_fs.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from datetime import datetime
import new
import os.path
import stat
import shutil
import tempfile
import unittest
from StringIO import StringIO
try:
from svn import core, repos
has_svn = True
except ImportError:
has_svn = False
from trac.test import EnvironmentStub, TestSetup
from trac.core import TracError
from trac.resource import Resource, resource_exists
from trac.util.concurrency import get_thread_id
from trac.util.datefmt import utc
from trac.versioncontrol import DbRepositoryProvider, Changeset, Node, \
NoSuchChangeset
from tracopt.versioncontrol.svn import svn_fs
REPOS_PATH = os.path.join(tempfile.gettempdir(), 'trac-svnrepos')
REPOS_NAME = 'repo'
HEAD = 22
TETE = 21
class SubversionRepositoryTestSetup(TestSetup):
def setUp(self):
dumpfile = open(os.path.join(os.path.split(__file__)[0],
'svnrepos.dump'))
svn_fs._import_svn()
core.apr_initialize()
pool = core.svn_pool_create(None)
dumpstream = None
try:
if os.path.exists(REPOS_PATH):
print 'trouble ahead with db/rep-cache.db... see #8278'
r = repos.svn_repos_create(REPOS_PATH, '', '', None, None, pool)
if hasattr(repos, 'svn_repos_load_fs2'):
repos.svn_repos_load_fs2(r, dumpfile, StringIO(),
repos.svn_repos_load_uuid_default, '',
0, 0, None, pool)
else:
dumpstream = core.svn_stream_from_aprfile(dumpfile, pool)
repos.svn_repos_load_fs(r, dumpstream, None,
repos.svn_repos_load_uuid_default, '',
None, None, pool)
finally:
if dumpstream:
core.svn_stream_close(dumpstream)
core.svn_pool_destroy(pool)
core.apr_terminate()
def tearDown(self):
repos.svn_repos_delete(REPOS_PATH)
# -- Re-usable test mixins
class NormalTests(object):
def test_resource_exists(self):
repos = Resource('repository', REPOS_NAME)
self.assertEqual(True, resource_exists(self.env, repos))
self.assertEqual(False, resource_exists(self.env, repos(id='xxx')))
node = repos.child('source', u'tête')
self.assertEqual(True, resource_exists(self.env, node))
self.assertEqual(False, resource_exists(self.env, node(id='xxx')))
cset = repos.child('changeset', HEAD)
self.assertEqual(True, resource_exists(self.env, cset))
self.assertEqual(False, resource_exists(self.env, cset(id=123456)))
def test_repos_normalize_path(self):
self.assertEqual('/', self.repos.normalize_path('/'))
self.assertEqual('/', self.repos.normalize_path(''))
self.assertEqual('/', self.repos.normalize_path(None))
self.assertEqual(u'tête', self.repos.normalize_path(u'tête'))
self.assertEqual(u'tête', self.repos.normalize_path(u'/tête'))
self.assertEqual(u'tête', self.repos.normalize_path(u'tête/'))
self.assertEqual(u'tête', self.repos.normalize_path(u'/tête/'))
def test_repos_normalize_rev(self):
self.assertEqual(HEAD, self.repos.normalize_rev('latest'))
self.assertEqual(HEAD, self.repos.normalize_rev('head'))
self.assertEqual(HEAD, self.repos.normalize_rev(''))
self.assertRaises(NoSuchChangeset,
self.repos.normalize_rev, 'something else')
self.assertEqual(HEAD, self.repos.normalize_rev(None))
self.assertEqual(11, self.repos.normalize_rev('11'))
self.assertEqual(11, self.repos.normalize_rev(11))
def test_rev_navigation(self):
self.assertEqual(1, self.repos.oldest_rev)
self.assertEqual(None, self.repos.previous_rev(0))
self.assertEqual(None, self.repos.previous_rev(1))
self.assertEqual(HEAD, self.repos.youngest_rev)
self.assertEqual(6, self.repos.next_rev(5))
self.assertEqual(7, self.repos.next_rev(6))
# ...
self.assertEqual(None, self.repos.next_rev(HEAD))
self.assertRaises(NoSuchChangeset, self.repos.normalize_rev, HEAD + 1)
def test_rev_path_navigation(self):
self.assertEqual(1, self.repos.oldest_rev)
self.assertEqual(None, self.repos.previous_rev(0, u'tête'))
self.assertEqual(None, self.repos.previous_rev(1, u'tête'))
self.assertEqual(HEAD, self.repos.youngest_rev)
self.assertEqual(6, self.repos.next_rev(5, u'tête'))
self.assertEqual(13, self.repos.next_rev(6, u'tête'))
# ...
self.assertEqual(None, self.repos.next_rev(HEAD, u'tête'))
# test accentuated characters
self.assertEqual(None,
self.repos.previous_rev(17, u'tête/R\xe9sum\xe9.txt'))
self.assertEqual(17, self.repos.next_rev(16, u'tête/R\xe9sum\xe9.txt'))
def test_has_node(self):
self.assertEqual(False, self.repos.has_node(u'/tête/dir1', 3))
self.assertEqual(True, self.repos.has_node(u'/tête/dir1', 4))
self.assertEqual(True, self.repos.has_node(u'/tête/dir1'))
def test_get_node(self):
node = self.repos.get_node(u'/tête')
self.assertEqual(u'tête', node.name)
self.assertEqual(u'/tête', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(HEAD, node.rev)
self.assertEqual(TETE, node.created_rev)
self.assertEqual(datetime(2007, 4, 30, 17, 45, 26, 234375, utc),
node.last_modified)
node = self.repos.get_node(u'/tête/README.txt')
self.assertEqual('README.txt', node.name)
self.assertEqual(u'/tête/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(HEAD, node.rev)
self.assertEqual(3, node.created_rev)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 234643, utc),
node.last_modified)
def test_get_node_specific_rev(self):
node = self.repos.get_node(u'/tête', 1)
self.assertEqual(u'tête', node.name)
self.assertEqual(u'/tête', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(1, node.rev)
self.assertEqual(datetime(2005, 4, 1, 10, 0, 52, 353248, utc),
node.last_modified)
node = self.repos.get_node(u'/tête/README.txt', 2)
self.assertEqual('README.txt', node.name)
self.assertEqual(u'/tête/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(2, node.rev)
self.assertEqual(datetime(2005, 4, 1, 13, 12, 18, 216267, utc),
node.last_modified)
def test_get_dir_entries(self):
node = self.repos.get_node(u'/tête')
entries = node.get_entries()
self.assertEqual('dir1', entries.next().name)
self.assertEqual('mpp_proc', entries.next().name)
self.assertEqual('v2', entries.next().name)
self.assertEqual('README3.txt', entries.next().name)
self.assertEqual(u'R\xe9sum\xe9.txt', entries.next().name)
self.assertEqual('README.txt', entries.next().name)
self.assertRaises(StopIteration, entries.next)
def test_get_file_entries(self):
node = self.repos.get_node(u'/tête/README.txt')
entries = node.get_entries()
self.assertRaises(StopIteration, entries.next)
def test_get_dir_content(self):
node = self.repos.get_node(u'/tête')
self.assertEqual(None, node.content_length)
self.assertEqual(None, node.content_type)
self.assertEqual(None, node.get_content())
def test_get_file_content(self):
node = self.repos.get_node(u'/tête/README.txt')
self.assertEqual(8, node.content_length)
self.assertEqual('text/plain', node.content_type)
self.assertEqual('A test.\n', node.get_content().read())
def test_get_dir_properties(self):
f = self.repos.get_node(u'/tête')
props = f.get_properties()
self.assertEqual(1, len(props))
def test_get_file_properties(self):
f = self.repos.get_node(u'/tête/README.txt')
props = f.get_properties()
self.assertEqual('native', props['svn:eol-style'])
self.assertEqual('text/plain', props['svn:mime-type'])
def test_created_path_rev(self):
node = self.repos.get_node(u'/tête/README3.txt', 15)
self.assertEqual(15, node.rev)
self.assertEqual(u'/tête/README3.txt', node.path)
self.assertEqual(14, node.created_rev)
self.assertEqual(u'tête/README3.txt', node.created_path)
def test_created_path_rev_parent_copy(self):
node = self.repos.get_node('/tags/v1/README.txt', 15)
self.assertEqual(15, node.rev)
self.assertEqual('/tags/v1/README.txt', node.path)
self.assertEqual(3, node.created_rev)
self.assertEqual(u'tête/README.txt', node.created_path)
# Revision Log / node history
def test_get_node_history(self):
node = self.repos.get_node(u'/tête/README3.txt')
history = node.get_history()
self.assertEqual((u'tête/README3.txt', 14, 'copy'), history.next())
self.assertEqual((u'tête/README2.txt', 6, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'edit'), history.next())
self.assertEqual((u'tête/README.txt', 2, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_node_history_limit(self):
node = self.repos.get_node(u'/tête/README3.txt')
history = node.get_history(2)
self.assertEqual((u'tête/README3.txt', 14, 'copy'), history.next())
self.assertEqual((u'tête/README2.txt', 6, 'copy'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_node_history_follow_copy(self):
node = self.repos.get_node('/tags/v1/README.txt')
history = node.get_history()
self.assertEqual(('tags/v1/README.txt', 7, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'edit'), history.next())
self.assertEqual((u'tête/README.txt', 2, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_copy_ancestry(self):
node = self.repos.get_node('/tags/v1/README.txt')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tête/README.txt', 6)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
node = self.repos.get_node(u'/tête/README3.txt')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tête/README2.txt', 13),
(u'tête/README.txt', 3)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
node = self.repos.get_node('/branches/v1x')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tags/v1.1', 11),
(u'branches/v1x', 9),
(u'tags/v1', 7),
(u'tête', 6)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_copy_ancestry_for_move(self):
node = self.repos.get_node(u'/tête/dir1/dir2', 5)
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'tête/dir2', 4)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_branch_origin(self):
node = self.repos.get_node('/tags/v1/README.txt')
self.assertEqual(7, node.get_branch_origin())
node = self.repos.get_node(u'/tête/README3.txt')
self.assertEqual(14, node.get_branch_origin())
node = self.repos.get_node('/branches/v1x')
self.assertEqual(12, node.get_branch_origin())
node = self.repos.get_node(u'/tête/dir1/dir2', 5)
self.assertEqual(5, node.get_branch_origin())
# Revision Log / path history
def test_get_path_history(self):
history = self.repos.get_path_history(u'/tête/README2.txt', None)
self.assertEqual((u'tête/README2.txt', 14, 'delete'), history.next())
self.assertEqual((u'tête/README2.txt', 6, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_file(self):
history = self.repos.get_path_history('/tags/v1/README.txt', None)
self.assertEqual(('tags/v1/README.txt', 7, 'copy'), history.next())
self.assertEqual((u'tête/README.txt', 3, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_dir(self):
history = self.repos.get_path_history('/branches/v1x', None)
self.assertEqual(('branches/v1x', 12, 'copy'), history.next())
self.assertEqual(('tags/v1.1', 10, 'unknown'), history.next())
self.assertEqual(('branches/v1x', 11, 'delete'), history.next())
self.assertEqual(('branches/v1x', 9, 'edit'), history.next())
self.assertEqual(('branches/v1x', 8, 'copy'), history.next())
self.assertEqual(('tags/v1', 7, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
# Diffs
def _cmp_diff(self, expected, got):
if expected[0]:
old = self.repos.get_node(*expected[0])
self.assertEqual((old.path, old.rev), (got[0].path, got[0].rev))
if expected[1]:
new = self.repos.get_node(*expected[1])
self.assertEqual((new.path, new.rev), (got[1].path, got[1].rev))
self.assertEqual(expected[2], (got[2], got[3]))
def test_diff_file_different_revs(self):
diffs = self.repos.get_changes(u'tête/README.txt', 2,
u'tête/README.txt', 3)
self._cmp_diff(((u'tête/README.txt', 2),
(u'tête/README.txt', 3),
(Node.FILE, Changeset.EDIT)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_file_different_files(self):
diffs = self.repos.get_changes('branches/v1x/README.txt', 12,
'branches/v1x/README2.txt', 12)
self._cmp_diff((('branches/v1x/README.txt', 12),
('branches/v1x/README2.txt', 12),
(Node.FILE, Changeset.EDIT)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_file_no_change(self):
diffs = self.repos.get_changes(u'tête/README.txt', 7,
'tags/v1/README.txt', 7)
self.assertRaises(StopIteration, diffs.next)
def test_diff_dir_different_revs(self):
diffs = self.repos.get_changes(u'tête', 4, u'tête', 8)
self._cmp_diff((None, (u'tête/README2.txt', 8),
(Node.FILE, Changeset.ADD)), diffs.next())
self._cmp_diff((None, (u'tête/dir1/dir2', 8),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, (u'tête/dir1/dir3', 8),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff(((u'tête/dir2', 4), None,
(Node.DIRECTORY, Changeset.DELETE)), diffs.next())
self._cmp_diff(((u'tête/dir3', 4), None,
(Node.DIRECTORY, Changeset.DELETE)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_dir_different_dirs(self):
diffs = self.repos.get_changes(u'tête', 1, 'branches/v1x', 12)
self._cmp_diff((None, ('branches/v1x/README.txt', 12),
(Node.FILE, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/README2.txt', 12),
(Node.FILE, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/dir1', 12),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/dir1/dir2', 12),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self._cmp_diff((None, ('branches/v1x/dir1/dir3', 12),
(Node.DIRECTORY, Changeset.ADD)), diffs.next())
self.assertRaises(StopIteration, diffs.next)
def test_diff_dir_no_change(self):
diffs = self.repos.get_changes(u'tête', 7,
'tags/v1', 7)
self.assertRaises(StopIteration, diffs.next)
# Changesets
def test_changeset_repos_creation(self):
chgset = self.repos.get_changeset(0)
self.assertEqual(0, chgset.rev)
self.assertEqual('', chgset.message)
self.assertEqual('', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 9, 57, 41, 312767, utc),
chgset.date)
self.assertRaises(StopIteration, chgset.get_changes().next)
def test_changeset_added_dirs(self):
chgset = self.repos.get_changeset(1)
self.assertEqual(1, chgset.rev)
self.assertEqual('Initial directory layout.', chgset.message)
self.assertEqual('john', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 10, 0, 52, 353248, utc),
chgset.date)
changes = chgset.get_changes()
self.assertEqual(('branches', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertEqual(('tags', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertEqual((u'tête', Node.DIRECTORY, Changeset.ADD, None, -1),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_file_edit(self):
chgset = self.repos.get_changeset(3)
self.assertEqual(3, chgset.rev)
self.assertEqual('Fixed README.\n', chgset.message)
self.assertEqual('kate', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 234643, utc),
chgset.date)
changes = chgset.get_changes()
self.assertEqual((u'tête/README.txt', Node.FILE, Changeset.EDIT,
u'tête/README.txt', 2), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_dir_moves(self):
chgset = self.repos.get_changeset(5)
self.assertEqual(5, chgset.rev)
self.assertEqual('Moved directories.', chgset.message)
self.assertEqual('kate', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 16, 25, 39, 658099, utc),
chgset.date)
changes = chgset.get_changes()
self.assertEqual((u'tête/dir1/dir2', Node.DIRECTORY, Changeset.MOVE,
u'tête/dir2', 4), changes.next())
self.assertEqual((u'tête/dir1/dir3', Node.DIRECTORY, Changeset.MOVE,
u'tête/dir3', 4), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_file_copy(self):
chgset = self.repos.get_changeset(6)
self.assertEqual(6, chgset.rev)
self.assertEqual('More things to read', chgset.message)
self.assertEqual('john', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 18, 56, 46, 985846, utc),
chgset.date)
changes = chgset.get_changes()
self.assertEqual((u'tête/README2.txt', Node.FILE, Changeset.COPY,
u'tête/README.txt', 3), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_root_propset(self):
chgset = self.repos.get_changeset(13)
self.assertEqual(13, chgset.rev)
self.assertEqual('Setting property on the repository_dir root',
chgset.message)
changes = chgset.get_changes()
self.assertEqual(('/', Node.DIRECTORY, Changeset.EDIT, '/', 12),
changes.next())
self.assertEqual((u'tête', Node.DIRECTORY, Changeset.EDIT, u'tête', 6),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_base_path_rev(self):
chgset = self.repos.get_changeset(9)
self.assertEqual(9, chgset.rev)
changes = chgset.get_changes()
self.assertEqual(('branches/v1x/README.txt', Node.FILE,
Changeset.EDIT, u'tête/README.txt', 3),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_rename_and_edit(self):
chgset = self.repos.get_changeset(14)
self.assertEqual(14, chgset.rev)
changes = chgset.get_changes()
self.assertEqual((u'tête/README3.txt', Node.FILE,
Changeset.MOVE, u'tête/README2.txt', 13),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_edit_after_wc2wc_copy__original_deleted(self):
chgset = self.repos.get_changeset(16)
self.assertEqual(16, chgset.rev)
changes = chgset.get_changes()
self.assertEqual(('branches/v2', Node.DIRECTORY, Changeset.COPY,
'tags/v1.1', 14),
changes.next())
self.assertEqual(('branches/v2/README2.txt', Node.FILE,
Changeset.EDIT, u'tête/README2.txt', 6),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_fancy_rename_double_delete(self):
chgset = self.repos.get_changeset(19)
self.assertEqual(19, chgset.rev)
changes = chgset.get_changes()
self.assertEqual((u'tête/mpp_proc', Node.DIRECTORY,
Changeset.MOVE, u'tête/Xprimary_proc', 18),
changes.next())
self.assertEqual((u'tête/mpp_proc/Xprimary_pkg.vhd',
Node.FILE, Changeset.DELETE,
u'tête/Xprimary_proc/Xprimary_pkg.vhd', 18),
changes.next())
self.assertEqual((u'tête/mpp_proc/Xprimary_proc', Node.DIRECTORY,
Changeset.COPY, u'tête/Xprimary_proc', 18),
changes.next())
self.assertEqual((u'tête/mpp_proc/Xprimary_proc/Xprimary_pkg.vhd',
Node.FILE, Changeset.DELETE,
u'tête/Xprimary_proc/Xprimary_pkg.vhd', 18),
changes.next())
self.assertRaises(StopIteration, changes.next)
def test_copy_with_deletions_below_copy(self):
"""Regression test for #4900."""
chgset = self.repos.get_changeset(22)
self.assertEqual(22, chgset.rev)
changes = chgset.get_changes()
self.assertEqual((u'branches/v3', 'dir', 'copy',
u'tête', 21), changes.next())
self.assertEqual((u'branches/v3/dir1', 'dir', 'delete',
u'tête/dir1', 21), changes.next())
self.assertEqual((u'branches/v3/mpp_proc', 'dir', 'delete',
u'tête/mpp_proc', 21), changes.next())
self.assertEqual((u'branches/v3/v2', 'dir', 'delete',
u'tête/v2', 21), changes.next())
self.assertRaises(StopIteration, changes.next)
def test_changeset_utf_8(self):
chgset = self.repos.get_changeset(20)
self.assertEqual(20, chgset.rev)
self.assertEqual(u'Chez moi ça marche\n', chgset.message)
self.assertEqual(u'Jonas Borgström', chgset.author)
class ScopedTests(object):
def test_repos_normalize_path(self):
self.assertEqual('/', self.repos.normalize_path('/'))
self.assertEqual('/', self.repos.normalize_path(''))
self.assertEqual('/', self.repos.normalize_path(None))
self.assertEqual('dir1', self.repos.normalize_path('dir1'))
self.assertEqual('dir1', self.repos.normalize_path('/dir1'))
self.assertEqual('dir1', self.repos.normalize_path('dir1/'))
self.assertEqual('dir1', self.repos.normalize_path('/dir1/'))
def test_repos_normalize_rev(self):
self.assertEqual(TETE, self.repos.normalize_rev('latest'))
self.assertEqual(TETE, self.repos.normalize_rev('head'))
self.assertEqual(TETE, self.repos.normalize_rev(''))
self.assertEqual(TETE, self.repos.normalize_rev(None))
self.assertEqual(5, self.repos.normalize_rev('5'))
self.assertEqual(5, self.repos.normalize_rev(5))
def test_rev_navigation(self):
self.assertEqual(1, self.repos.oldest_rev)
self.assertEqual(None, self.repos.previous_rev(0))
self.assertEqual(1, self.repos.previous_rev(2))
self.assertEqual(TETE, self.repos.youngest_rev)
self.assertEqual(2, self.repos.next_rev(1))
self.assertEqual(3, self.repos.next_rev(2))
# ...
self.assertEqual(None, self.repos.next_rev(TETE))
def test_has_node(self):
self.assertEqual(False, self.repos.has_node('/dir1', 3))
self.assertEqual(True, self.repos.has_node('/dir1', 4))
def test_get_node(self):
node = self.repos.get_node('/dir1')
self.assertEqual('dir1', node.name)
self.assertEqual('/dir1', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(TETE, node.rev)
self.assertEqual(5, node.created_rev)
self.assertEqual(datetime(2005, 4, 1, 16, 25, 39, 658099, utc),
node.last_modified)
node = self.repos.get_node('/README.txt')
self.assertEqual('README.txt', node.name)
self.assertEqual('/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(TETE, node.rev)
self.assertEqual(3, node.created_rev)
self.assertEqual(datetime(2005, 4, 1, 13, 24, 58, 234643, utc),
node.last_modified)
def test_get_node_specific_rev(self):
node = self.repos.get_node('/dir1', 4)
self.assertEqual('dir1', node.name)
self.assertEqual('/dir1', node.path)
self.assertEqual(Node.DIRECTORY, node.kind)
self.assertEqual(4, node.rev)
self.assertEqual(datetime(2005, 4, 1, 15, 42, 35, 450595, utc),
node.last_modified)
node = self.repos.get_node('/README.txt', 2)
self.assertEqual('README.txt', node.name)
self.assertEqual('/README.txt', node.path)
self.assertEqual(Node.FILE, node.kind)
self.assertEqual(2, node.rev)
self.assertEqual(datetime(2005, 4, 1, 13, 12, 18, 216267, utc),
node.last_modified)
def test_get_dir_entries(self):
node = self.repos.get_node('/')
entries = node.get_entries()
self.assertEqual('dir1', entries.next().name)
self.assertEqual('mpp_proc', entries.next().name)
self.assertEqual('v2', entries.next().name)
self.assertEqual('README3.txt', entries.next().name)
self.assertEqual(u'R\xe9sum\xe9.txt', entries.next().name)
self.assertEqual('README.txt', entries.next().name)
self.assertRaises(StopIteration, entries.next)
def test_get_file_entries(self):
node = self.repos.get_node('/README.txt')
entries = node.get_entries()
self.assertRaises(StopIteration, entries.next)
def test_get_dir_content(self):
node = self.repos.get_node('/dir1')
self.assertEqual(None, node.content_length)
self.assertEqual(None, node.content_type)
self.assertEqual(None, node.get_content())
def test_get_file_content(self):
node = self.repos.get_node('/README.txt')
self.assertEqual(8, node.content_length)
self.assertEqual('text/plain', node.content_type)
self.assertEqual('A test.\n', node.get_content().read())
def test_get_dir_properties(self):
f = self.repos.get_node('/dir1')
props = f.get_properties()
self.assertEqual(0, len(props))
def test_get_file_properties(self):
f = self.repos.get_node('/README.txt')
props = f.get_properties()
self.assertEqual('native', props['svn:eol-style'])
self.assertEqual('text/plain', props['svn:mime-type'])
# Revision Log / node history
def test_get_history_scope(self):
"""Regression test for #9504"""
node = self.repos.get_node('/')
history = list(node.get_history())
self.assertEqual(('/', 1, 'add'), history[-1])
initial_cset = self.repos.get_changeset(history[-1][1])
self.assertEqual(1, initial_cset.rev)
def test_get_node_history(self):
node = self.repos.get_node('/README3.txt')
history = node.get_history()
self.assertEqual(('README3.txt', 14, 'copy'), history.next())
self.assertEqual(('README2.txt', 6, 'copy'), history.next())
self.assertEqual(('README.txt', 3, 'edit'), history.next())
self.assertEqual(('README.txt', 2, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_node_history_follow_copy(self):
node = self.repos.get_node('dir1/dir3', )
history = node.get_history()
self.assertEqual(('dir1/dir3', 5, 'copy'), history.next())
self.assertEqual(('dir3', 4, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_copy_ancestry(self):
node = self.repos.get_node(u'/README3.txt')
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'README2.txt', 13),
(u'README.txt', 3)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_copy_ancestry_for_move(self):
node = self.repos.get_node(u'/dir1/dir2', 5)
ancestry = node.get_copy_ancestry()
self.assertEqual([(u'dir2', 4)], ancestry)
for path, rev in ancestry:
self.repos.get_node(path, rev) # shouldn't raise NoSuchNode
def test_get_branch_origin(self):
node = self.repos.get_node(u'/README3.txt')
self.assertEqual(14, node.get_branch_origin())
node = self.repos.get_node(u'/dir1/dir2', 5)
self.assertEqual(5, node.get_branch_origin())
# Revision Log / path history
def test_get_path_history(self):
history = self.repos.get_path_history('dir3', None)
self.assertEqual(('dir3', 5, 'delete'), history.next())
self.assertEqual(('dir3', 4, 'add'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_file(self):
history = self.repos.get_path_history('README3.txt', None)
self.assertEqual(('README3.txt', 14, 'copy'), history.next())
self.assertEqual(('README2.txt', 6, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_get_path_history_copied_dir(self):
history = self.repos.get_path_history('dir1/dir3', None)
self.assertEqual(('dir1/dir3', 5, 'copy'), history.next())
self.assertEqual(('dir3', 4, 'unknown'), history.next())
self.assertRaises(StopIteration, history.next)
def test_changeset_repos_creation(self):
chgset = self.repos.get_changeset(0)
self.assertEqual(0, chgset.rev)
self.assertEqual('', chgset.message)
self.assertEqual('', chgset.author)
self.assertEqual(datetime(2005, 4, 1, 9, 57, 41, 312767, utc),
chgset.date)
self.assertRaises(StopIteration, chgset.get_changes().next)
def test_changeset_added_dirs(self):
chgset = self.repos.get_changeset(4)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/svn/tests/__init__.py | trac/tracopt/versioncontrol/svn/tests/__init__.py | import unittest
from tracopt.versioncontrol.svn.tests import svn_fs
def suite():
suite = unittest.TestSuite()
suite.addTest(svn_fs.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/git/PyGIT.py | trac/tracopt/versioncontrol/git/PyGIT.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# Copyright (C) 2006-2011, Herbert Valerio Riedel <hvr@gnu.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import os
import codecs
from collections import deque
from contextlib import contextmanager
import cStringIO
from functools import partial
from operator import itemgetter
import re
from subprocess import Popen, PIPE
import sys
from threading import Lock
import time
import weakref
__all__ = ['GitError', 'GitErrorSha', 'Storage', 'StorageFactory']
def terminate(process):
"""Python 2.5 compatibility method.
os.kill is not available on Windows before Python 2.7.
In Python 2.6 subprocess.Popen has a terminate method.
(It also seems to have some issues on Windows though.)
"""
def terminate_win(process):
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE,
False,
process.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
def terminate_nix(process):
import os
import signal
return os.kill(process.pid, signal.SIGTERM)
if sys.platform == 'win32':
return terminate_win(process)
return terminate_nix(process)
class GitError(Exception):
pass
class GitErrorSha(GitError):
pass
# Helper functions
def parse_commit(raw):
"""Parse the raw content of a commit (as given by `git cat-file -p <rev>`).
Return the commit message and a dict of properties.
"""
if not raw:
raise GitErrorSha
lines = raw.splitlines()
if not lines:
raise GitErrorSha
line = lines.pop(0)
props = {}
multiline = multiline_key = None
while line:
if line[0] == ' ':
if not multiline:
multiline_key = key
multiline = [props[multiline_key][-1]]
multiline.append(line[1:])
else:
key, value = line.split(None, 1)
props.setdefault(key, []).append(value.strip())
line = lines.pop(0)
if multiline and (not line or key != multiline_key):
props[multiline_key][-1] = '\n'.join(multiline)
multiline = None
return '\n'.join(lines), props
class GitCore(object):
"""Low-level wrapper around git executable"""
def __init__(self, git_dir=None, git_bin='git'):
self.__git_bin = git_bin
self.__git_dir = git_dir
def __repr__(self):
return '<GitCore bin="%s" dir="%s">' % (self.__git_bin,
self.__git_dir)
def __build_git_cmd(self, gitcmd, *args):
"""construct command tuple for git call suitable for Popen()"""
cmd = [self.__git_bin]
if self.__git_dir:
cmd.append('--git-dir=%s' % self.__git_dir)
cmd.append(gitcmd)
cmd.extend(args)
return cmd
def __pipe(self, git_cmd, *cmd_args, **kw):
if sys.platform == 'win32':
return Popen(self.__build_git_cmd(git_cmd, *cmd_args), **kw)
else:
return Popen(self.__build_git_cmd(git_cmd, *cmd_args),
close_fds=True, **kw)
def __execute(self, git_cmd, *cmd_args):
"""execute git command and return file-like object of stdout"""
#print >>sys.stderr, "DEBUG:", git_cmd, cmd_args
p = self.__pipe(git_cmd, stdout=PIPE, stderr=PIPE, *cmd_args)
stdout_data, stderr_data = p.communicate()
#TODO, do something with p.returncode, e.g. raise exception
return stdout_data
def cat_file_batch(self):
return self.__pipe('cat-file', '--batch', stdin=PIPE, stdout=PIPE)
def log_pipe(self, *cmd_args):
return self.__pipe('log', stdout=PIPE, *cmd_args)
def __getattr__(self, name):
if name[0] == '_' or name in ['cat_file_batch', 'log_pipe']:
raise AttributeError, name
return partial(self.__execute, name.replace('_','-'))
__is_sha_pat = re.compile(r'[0-9A-Fa-f]*$')
@classmethod
def is_sha(cls, sha):
"""returns whether sha is a potential sha id
(i.e. proper hexstring between 4 and 40 characters)
"""
# quick test before starting up regexp matcher
if not (4 <= len(sha) <= 40):
return False
return bool(cls.__is_sha_pat.match(sha))
class SizedDict(dict):
"""Size-bounded dictionary with FIFO replacement strategy"""
def __init__(self, max_size=0):
dict.__init__(self)
self.__max_size = max_size
self.__key_fifo = deque()
self.__lock = Lock()
def __setitem__(self, name, value):
with self.__lock:
assert len(self) == len(self.__key_fifo) # invariant
if not self.__contains__(name):
self.__key_fifo.append(name)
rc = dict.__setitem__(self, name, value)
while len(self.__key_fifo) > self.__max_size:
self.__delitem__(self.__key_fifo.popleft())
assert len(self) == len(self.__key_fifo) # invariant
return rc
def setdefault(self, *_):
raise NotImplemented("SizedDict has no setdefault() method")
class StorageFactory(object):
__dict = weakref.WeakValueDictionary()
__dict_nonweak = dict()
__dict_lock = Lock()
def __init__(self, repo, log, weak=True, git_bin='git',
git_fs_encoding=None):
self.logger = log
with StorageFactory.__dict_lock:
try:
i = StorageFactory.__dict[repo]
except KeyError:
i = Storage(repo, log, git_bin, git_fs_encoding)
StorageFactory.__dict[repo] = i
# create or remove additional reference depending on 'weak'
# argument
if weak:
try:
del StorageFactory.__dict_nonweak[repo]
except KeyError:
pass
else:
StorageFactory.__dict_nonweak[repo] = i
self.__inst = i
self.__repo = repo
def getInstance(self):
is_weak = self.__repo not in StorageFactory.__dict_nonweak
self.logger.debug("requested %sPyGIT.Storage instance %d for '%s'"
% (("","weak ")[is_weak], id(self.__inst),
self.__repo))
return self.__inst
class Storage(object):
"""High-level wrapper around GitCore with in-memory caching"""
__SREV_MIN = 4 # minimum short-rev length
class RevCache(tuple):
"""RevCache(youngest_rev, oldest_rev, rev_dict, tag_set, srev_dict,
branch_dict)
In Python 2.7 this class could be defined by:
from collections import namedtuple
RevCache = namedtuple('RevCache', 'youngest_rev oldest_rev '
'rev_dict tag_set srev_dict '
'branch_dict')
This implementation is what that code generator would produce.
"""
__slots__ = ()
_fields = ('youngest_rev', 'oldest_rev', 'rev_dict', 'tag_set',
'srev_dict', 'branch_dict')
def __new__(cls, youngest_rev, oldest_rev, rev_dict, tag_set,
srev_dict, branch_dict):
return tuple.__new__(cls, (youngest_rev, oldest_rev, rev_dict,
tag_set, srev_dict, branch_dict))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
"""Make a new RevCache object from a sequence or iterable"""
result = new(cls, iterable)
if len(result) != 6:
raise TypeError('Expected 6 arguments, got %d' % len(result))
return result
def __repr__(self):
return 'RevCache(youngest_rev=%r, oldest_rev=%r, rev_dict=%r, ' \
'tag_set=%r, srev_dict=%r, branch_dict=%r)' % self
def _asdict(t):
"""Return a new dict which maps field names to their values"""
return {'youngest_rev': t[0], 'oldest_rev': t[1],
'rev_dict': t[2], 'tag_set': t[3], 'srev_dict': t[4],
'branch_dict': t[5]}
def _replace(self, **kwds):
"""Return a new RevCache object replacing specified fields with
new values
"""
result = self._make(map(kwds.pop, ('youngest_rev', 'oldest_rev',
'rev_dict', 'tag_set', 'srev_dict', 'branch_dict'), self))
if kwds:
raise ValueError("Got unexpected field names: %r"
% kwds.keys())
return result
def __getnewargs__(self):
return tuple(self)
youngest_rev = property(itemgetter(0))
oldest_rev = property(itemgetter(1))
rev_dict = property(itemgetter(2))
tag_set = property(itemgetter(3))
srev_dict = property(itemgetter(4))
branch_dict = property(itemgetter(5))
@staticmethod
def __rev_key(rev):
assert len(rev) >= 4
#assert GitCore.is_sha(rev)
srev_key = int(rev[:4], 16)
assert srev_key >= 0 and srev_key <= 0xffff
return srev_key
@staticmethod
def git_version(git_bin='git'):
GIT_VERSION_MIN_REQUIRED = (1, 5, 6)
try:
g = GitCore(git_bin=git_bin)
[v] = g.version().splitlines()
version = v.strip().split()[2]
# 'version' has usually at least 3 numeric version
# components, e.g.::
# 1.5.4.2
# 1.5.4.3.230.g2db511
# 1.5.4.GIT
def try_int(s):
try:
return int(s)
except ValueError:
return s
split_version = tuple(map(try_int, version.split('.')))
result = {}
result['v_str'] = version
result['v_tuple'] = split_version
result['v_min_tuple'] = GIT_VERSION_MIN_REQUIRED
result['v_min_str'] = ".".join(map(str, GIT_VERSION_MIN_REQUIRED))
result['v_compatible'] = split_version >= GIT_VERSION_MIN_REQUIRED
return result
except Exception, e:
raise GitError("Could not retrieve GIT version (tried to "
"execute/parse '%s --version' but got %s)"
% (git_bin, repr(e)))
def __init__(self, git_dir, log, git_bin='git', git_fs_encoding=None):
"""Initialize PyGit.Storage instance
`git_dir`: path to .git folder;
this setting is not affected by the `git_fs_encoding` setting
`log`: logger instance
`git_bin`: path to executable
this setting is not affected by the `git_fs_encoding` setting
`git_fs_encoding`: encoding used for paths stored in git repository;
if `None`, no implicit decoding/encoding to/from
unicode objects is performed, and bytestrings are
returned instead
"""
self.logger = log
self.commit_encoding = None
# caches
self.__rev_cache = None
self.__rev_cache_lock = Lock()
# cache the last 200 commit messages
self.__commit_msg_cache = SizedDict(200)
self.__commit_msg_lock = Lock()
self.__cat_file_pipe = None
self.__cat_file_pipe_lock = Lock()
if git_fs_encoding is not None:
# validate encoding name
codecs.lookup(git_fs_encoding)
# setup conversion functions
self._fs_to_unicode = lambda s: s.decode(git_fs_encoding)
self._fs_from_unicode = lambda s: s.encode(git_fs_encoding)
else:
# pass bytestrings as-is w/o any conversion
self._fs_to_unicode = self._fs_from_unicode = lambda s: s
# simple sanity checking
__git_file_path = partial(os.path.join, git_dir)
if not all(map(os.path.exists,
map(__git_file_path,
['HEAD','objects','refs']))):
self.logger.error("GIT control files missing in '%s'" % git_dir)
if os.path.exists(__git_file_path('.git')):
self.logger.error("entry '.git' found in '%s'"
" -- maybe use that folder instead..."
% git_dir)
raise GitError("GIT control files not found, maybe wrong "
"directory?")
self.repo = GitCore(git_dir, git_bin=git_bin)
self.logger.debug("PyGIT.Storage instance %d constructed" % id(self))
def __del__(self):
with self.__cat_file_pipe_lock:
if self.__cat_file_pipe is not None:
self.__cat_file_pipe.stdin.close()
terminate(self.__cat_file_pipe)
self.__cat_file_pipe.wait()
#
# cache handling
#
# called by Storage.sync()
def __rev_cache_sync(self, youngest_rev=None):
"""invalidates revision db cache if necessary"""
with self.__rev_cache_lock:
need_update = False
if self.__rev_cache:
last_youngest_rev = self.__rev_cache.youngest_rev
if last_youngest_rev != youngest_rev:
self.logger.debug("invalidated caches (%s != %s)"
% (last_youngest_rev, youngest_rev))
need_update = True
else:
need_update = True # almost NOOP
if need_update:
self.__rev_cache = None
return need_update
def get_rev_cache(self):
"""Retrieve revision cache
may rebuild cache on the fly if required
returns RevCache tuple
"""
with self.__rev_cache_lock:
if self.__rev_cache is None:
# can be cleared by Storage.__rev_cache_sync()
self.logger.debug("triggered rebuild of commit tree db "
"for %d" % id(self))
ts0 = time.time()
youngest = None
oldest = None
new_db = {} # db
new_sdb = {} # short_rev db
# helper for reusing strings
__rev_seen = {}
def __rev_reuse(rev):
rev = str(rev)
return __rev_seen.setdefault(rev, rev)
new_tags = set(__rev_reuse(rev.strip())
for rev in self.repo.rev_parse('--tags')
.splitlines())
new_branches = [(k, __rev_reuse(v))
for k, v in self._get_branches()]
head_revs = set(v for _, v in new_branches)
rev = ord_rev = 0
for ord_rev, revs in enumerate(
self.repo.rev_list('--parents',
'--topo-order',
'--all')
.splitlines()):
revs = map(__rev_reuse, revs.strip().split())
rev = revs[0]
# first rev seen is assumed to be the youngest one
if not ord_rev:
youngest = rev
# shortrev "hash" map
srev_key = self.__rev_key(rev)
new_sdb.setdefault(srev_key, []).append(rev)
# parents
parents = tuple(revs[1:])
# new_db[rev] = (children(rev), parents(rev),
# ordinal_id(rev), rheads(rev))
if rev in new_db:
# (incomplete) entry was already created by children
_children, _parents, _ord_rev, _rheads = new_db[rev]
assert _children
assert not _parents
assert _ord_rev == 0
if rev in head_revs and rev not in _rheads:
_rheads.append(rev)
else: # new entry
_children = []
_rheads = [rev] if rev in head_revs else []
# create/update entry
# transform lists into tuples since entry will be final
new_db[rev] = tuple(_children), tuple(parents), \
ord_rev + 1, tuple(_rheads)
# update parents(rev)s
for parent in parents:
# by default, a dummy ordinal_id is used
# for the mean-time
_children, _parents, _ord_rev, _rheads2 = \
new_db.setdefault(parent, ([], [], 0, []))
# update parent(rev)'s children
if rev not in _children:
_children.append(rev)
# update parent(rev)'s rheads
for rev in _rheads:
if rev not in _rheads2:
_rheads2.append(rev)
# last rev seen is assumed to be the oldest
# one (with highest ord_rev)
oldest = rev
__rev_seen = None
# convert sdb either to dict or array depending on size
tmp = [()]*(max(new_sdb.keys())+1) \
if len(new_sdb) > 5000 else {}
try:
while True:
k, v = new_sdb.popitem()
tmp[k] = tuple(v)
except KeyError:
pass
assert len(new_sdb) == 0
new_sdb = tmp
# atomically update self.__rev_cache
self.__rev_cache = Storage.RevCache(youngest, oldest, new_db,
new_tags, new_sdb,
new_branches)
ts1 = time.time()
self.logger.debug("rebuilt commit tree db for %d with %d "
"entries (took %.1f ms)"
% (id(self), len(new_db), 1000*(ts1-ts0)))
assert all(e is not None for e in self.__rev_cache) \
or not any(self.__rev_cache)
return self.__rev_cache
# with self.__rev_cache_lock
# see RevCache namedtuple
rev_cache = property(get_rev_cache)
def _get_branches(self):
"""returns list of (local) branches, with active (= HEAD) one being
the first item
"""
result = []
for e in self.repo.branch('-v', '--no-abbrev').splitlines():
bname, bsha = e[1:].strip().split()[:2]
if e.startswith('*'):
result.insert(0, (bname, bsha))
else:
result.append((bname, bsha))
return result
def get_branches(self):
"""returns list of (local) branches, with active (= HEAD) one being
the first item
"""
return ((self._fs_to_unicode(name), sha)
for name, sha in self.rev_cache.branch_dict)
def get_commits(self):
return self.rev_cache.rev_dict
def oldest_rev(self):
return self.rev_cache.oldest_rev
def youngest_rev(self):
return self.rev_cache.youngest_rev
def get_branch_contains(self, sha, resolve=False):
"""return list of reachable head sha ids or (names, sha) pairs if
resolve is true
see also get_branches()
"""
_rev_cache = self.rev_cache
try:
rheads = _rev_cache.rev_dict[sha][3]
except KeyError:
return []
if resolve:
return ((self._fs_to_unicode(k), v)
for k, v in _rev_cache.branch_dict if v in rheads)
return rheads
def history_relative_rev(self, sha, rel_pos):
db = self.get_commits()
if sha not in db:
raise GitErrorSha()
if rel_pos == 0:
return sha
lin_rev = db[sha][2] + rel_pos
if lin_rev < 1 or lin_rev > len(db):
return None
for k, v in db.iteritems():
if v[2] == lin_rev:
return k
# should never be reached if db is consistent
raise GitError("internal inconsistency detected")
def hist_next_revision(self, sha):
return self.history_relative_rev(sha, -1)
def hist_prev_revision(self, sha):
return self.history_relative_rev(sha, +1)
def get_commit_encoding(self):
if self.commit_encoding is None:
self.commit_encoding = \
self.repo.repo_config("--get", "i18n.commitEncoding") \
.strip() or 'utf-8'
return self.commit_encoding
def head(self):
"""get current HEAD commit id"""
return self.verifyrev('HEAD')
def cat_file(self, kind, sha):
with self.__cat_file_pipe_lock:
if self.__cat_file_pipe is None:
self.__cat_file_pipe = self.repo.cat_file_batch()
try:
self.__cat_file_pipe.stdin.write(sha + '\n')
self.__cat_file_pipe.stdin.flush()
split_stdout_line = self.__cat_file_pipe.stdout.readline() \
.split()
if len(split_stdout_line) != 3:
raise GitError("internal error (could not split line "
"'%s')" % (split_stdout_line,))
_sha, _type, _size = split_stdout_line
if _type != kind:
raise GitError("internal error (got unexpected object "
"kind '%s', expected '%s')"
% (_type, kind))
size = int(_size)
return self.__cat_file_pipe.stdout.read(size + 1)[:size]
except:
# There was an error, we should close the pipe to get to a
# consistent state (Otherwise it happens that next time we
# call cat_file we get payload from previous call)
self.logger.debug("closing cat_file pipe")
self.__cat_file_pipe.stdin.close()
terminate(self.__cat_file_pipe)
self.__cat_file_pipe.wait()
self.__cat_file_pipe = None
def verifyrev(self, rev):
"""verify/lookup given revision object and return a sha id or None
if lookup failed
"""
rev = self._fs_from_unicode(rev)
_rev_cache = self.rev_cache
if GitCore.is_sha(rev):
# maybe it's a short or full rev
fullrev = self.fullrev(rev)
if fullrev:
return fullrev
# fall back to external git calls
rc = self.repo.rev_parse('--verify', rev).strip()
if not rc:
return None
if rc in _rev_cache.rev_dict:
return rc
if rc in _rev_cache.tag_set:
sha = self.cat_file('tag', rc).split(None, 2)[:2]
if sha[0] != 'object':
self.logger.debug("unexpected result from 'git-cat-file tag "
"%s'" % rc)
return None
return sha[1]
return None
def shortrev(self, rev, min_len=7):
"""try to shorten sha id"""
#try to emulate the following:
#return self.repo.rev_parse("--short", str(rev)).strip()
rev = str(rev)
if min_len < self.__SREV_MIN:
min_len = self.__SREV_MIN
_rev_cache = self.rev_cache
if rev not in _rev_cache.rev_dict:
return None
srev = rev[:min_len]
srevs = set(_rev_cache.srev_dict[self.__rev_key(rev)])
if len(srevs) == 1:
return srev # we already got a unique id
# find a shortened id for which rev doesn't conflict with
# the other ones from srevs
crevs = srevs - set([rev])
for l in range(min_len+1, 40):
srev = rev[:l]
if srev not in [ r[:l] for r in crevs ]:
return srev
return rev # worst-case, all except the last character match
def fullrev(self, srev):
"""try to reverse shortrev()"""
srev = str(srev)
_rev_cache = self.rev_cache
# short-cut
if len(srev) == 40 and srev in _rev_cache.rev_dict:
return srev
if not GitCore.is_sha(srev):
return None
try:
srevs = _rev_cache.srev_dict[self.__rev_key(srev)]
except KeyError:
return None
srevs = filter(lambda s: s.startswith(srev), srevs)
if len(srevs) == 1:
return srevs[0]
return None
def get_tags(self):
return (self._fs_to_unicode(e.strip())
for e in self.repo.tag('-l').splitlines())
def ls_tree(self, rev, path=''):
rev = rev and str(rev) or 'HEAD' # paranoia
path = self._fs_from_unicode(path)
if path.startswith('/'):
path = path[1:]
tree = self.repo.ls_tree('-z', '-l', rev, '--', path).split('\0')
def split_ls_tree_line(l):
"""split according to '<mode> <type> <sha> <size>\t<fname>'"""
meta, fname = l.split('\t', 1)
_mode, _type, _sha, _size = meta.split()
if _size == '-':
_size = None
else:
_size = int(_size)
return _mode, _type, _sha, _size, self._fs_to_unicode(fname)
return [ split_ls_tree_line(e) for e in tree if e ]
def read_commit(self, commit_id):
if not commit_id:
raise GitError("read_commit called with empty commit_id")
commit_id, commit_id_orig = self.fullrev(commit_id), commit_id
db = self.get_commits()
if commit_id not in db:
self.logger.info("read_commit failed for '%s' ('%s')" %
(commit_id, commit_id_orig))
raise GitErrorSha
with self.__commit_msg_lock:
if self.__commit_msg_cache.has_key(commit_id):
# cache hit
result = self.__commit_msg_cache[commit_id]
return result[0], dict(result[1])
# cache miss
raw = self.cat_file('commit', commit_id)
raw = unicode(raw, self.get_commit_encoding(), 'replace')
result = parse_commit(raw)
self.__commit_msg_cache[commit_id] = result
return result[0], dict(result[1])
def get_file(self, sha):
return cStringIO.StringIO(self.cat_file('blob', str(sha)))
def get_obj_size(self, sha):
sha = str(sha)
try:
obj_size = int(self.repo.cat_file('-s', sha).strip())
except ValueError:
raise GitErrorSha("object '%s' not found" % sha)
return obj_size
def children(self, sha):
db = self.get_commits()
try:
return list(db[sha][0])
except KeyError:
return []
def children_recursive(self, sha, rev_dict=None):
"""Recursively traverse children in breadth-first order"""
if rev_dict is None:
rev_dict = self.get_commits()
work_list = deque()
seen = set()
seen.update(rev_dict[sha][0])
work_list.extend(rev_dict[sha][0])
while work_list:
p = work_list.popleft()
yield p
_children = set(rev_dict[p][0]) - seen
seen.update(_children)
work_list.extend(_children)
assert len(work_list) == 0
def parents(self, sha):
db = self.get_commits()
try:
return list(db[sha][1])
except KeyError:
return []
def all_revs(self):
return self.get_commits().iterkeys()
def sync(self):
rev = self.repo.rev_list('--max-count=1', '--topo-order', '--all') \
.strip()
return self.__rev_cache_sync(rev)
@contextmanager
def get_historian(self, sha, base_path):
p = []
change = {}
next_path = []
def name_status_gen():
p[:] = [self.repo.log_pipe('--pretty=format:%n%H',
'--name-status', sha, '--', base_path)]
f = p[0].stdout
for l in f:
if l == '\n':
continue
old_sha = l.rstrip('\n')
for l in f:
if l == '\n':
break
_, path = l.rstrip('\n').split('\t', 1)
while path not in change:
change[path] = old_sha
if next_path == [path]:
yield old_sha
try:
path, _ = path.rsplit('/', 1)
except ValueError:
break
f.close()
terminate(p[0])
p[0].wait()
p[:] = []
while True:
yield None
gen = name_status_gen()
def historian(path):
try:
return change[path]
except KeyError:
next_path[:] = [path]
return gen.next()
yield historian
if p:
p[0].stdout.close()
terminate(p[0])
p[0].wait()
def last_change(self, sha, path, historian=None):
if historian is not None:
return historian(path)
return self.repo.rev_list('--max-count=1',
sha, '--',
self._fs_from_unicode(path)).strip() or None
def history(self, sha, path, limit=None):
if limit is None:
limit = -1
tmp = self.repo.rev_list('--max-count=%d' % limit, str(sha), '--',
self._fs_from_unicode(path))
return [ rev.strip() for rev in tmp.splitlines() ]
def history_timerange(self, start, stop):
return [ rev.strip() for rev in \
self.repo.rev_list('--reverse',
'--max-age=%d' % start,
'--min-age=%d' % stop,
'--all').splitlines() ]
def rev_is_anchestor_of(self, rev1, rev2):
"""return True if rev2 is successor of rev1"""
rev1 = rev1.strip()
rev2 = rev2.strip()
rev_dict = self.get_commits()
return (rev2 in rev_dict and
rev2 in self.children_recursive(rev1, rev_dict))
def blame(self, commit_sha, path):
in_metadata = False
path = self._fs_from_unicode(path)
for line in self.repo.blame('-p', '--', path, str(commit_sha)) \
.splitlines():
assert line
if in_metadata:
in_metadata = not line.startswith('\t')
else:
split_line = line.split()
if len(split_line) == 4:
(sha, orig_lineno, lineno, group_size) = split_line
else:
(sha, orig_lineno, lineno) = split_line
assert len(sha) == 40
yield (sha, lineno)
in_metadata = True
assert not in_metadata
def diff_tree(self, tree1, tree2, path='', find_renames=False):
"""calls `git diff-tree` and returns tuples of the kind
(mode1,mode2,obj1,obj2,action,path1,path2)"""
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/git/__init__.py | trac/tracopt/versioncontrol/git/__init__.py | python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false | |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/git/git_fs.py | trac/tracopt/versioncontrol/git/git_fs.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# Copyright (C) 2006-2011, Herbert Valerio Riedel <hvr@gnu.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
from datetime import datetime
import os
import sys
from genshi.builder import tag
from trac.config import BoolOption, IntOption, PathOption, Option
from trac.core import *
from trac.util import TracError, shorten_line
from trac.util.datefmt import FixedOffset, to_timestamp, format_datetime
from trac.util.text import to_unicode
from trac.versioncontrol.api import Changeset, Node, Repository, \
IRepositoryConnector, NoSuchChangeset, \
NoSuchNode, IRepositoryProvider
from trac.versioncontrol.cache import CachedRepository, CachedChangeset
from trac.versioncontrol.web_ui import IPropertyRenderer
from trac.web.chrome import Chrome
from trac.wiki import IWikiSyntaxProvider
from tracopt.versioncontrol.git import PyGIT
class GitCachedRepository(CachedRepository):
"""Git-specific cached repository.
Passes through {display,short,normalize}_rev
"""
def display_rev(self, rev):
return self.short_rev(rev)
def short_rev(self, path):
return self.repos.short_rev(path)
def normalize_rev(self, rev):
if not rev:
return self.repos.get_youngest_rev()
normrev = self.repos.git.verifyrev(rev)
if normrev is None:
raise NoSuchChangeset(rev)
return normrev
def get_changeset(self, rev):
return GitCachedChangeset(self, self.normalize_rev(rev), self.env)
class GitCachedChangeset(CachedChangeset):
"""Git-specific cached changeset.
Handles get_branches()
"""
def get_branches(self):
_rev = self.rev
return [(k, v == _rev) for k, v in
self.repos.repos.git.get_branch_contains(_rev, resolve=True)]
def _last_iterable(iterable):
"""helper for detecting last iteration in for-loop"""
i = iter(iterable)
v = i.next()
for nextv in i:
yield False, v
v = nextv
yield True, v
def intersperse(sep, iterable):
"""The 'intersperse' generator takes an element and an iterable and
intersperses that element between the elements of the iterable.
inspired by Haskell's ``Data.List.intersperse``
"""
for i, item in enumerate(iterable):
if i: yield sep
yield item
# helper
def _parse_user_time(s):
"""Parse author or committer attribute lines and return
corresponding ``(user, timestamp)`` pair.
"""
user, time, tz_str = s.rsplit(None, 2)
tz = FixedOffset((int(tz_str) * 6) / 10, tz_str)
time = datetime.fromtimestamp(float(time), tz)
return user, time
class GitConnector(Component):
implements(IRepositoryConnector, IWikiSyntaxProvider)
def __init__(self):
self._version = None
try:
self._version = PyGIT.Storage.git_version(git_bin=self.git_bin)
except PyGIT.GitError, e:
self.log.error("GitError: " + str(e))
if self._version:
self.log.info("detected GIT version %s" % self._version['v_str'])
self.env.systeminfo.append(('GIT', self._version['v_str']))
if not self._version['v_compatible']:
self.log.error("GIT version %s installed not compatible"
"(need >= %s)" %
(self._version['v_str'],
self._version['v_min_str']))
# IWikiSyntaxProvider methods
def _format_sha_link(self, formatter, sha, label):
# FIXME: this function needs serious rethinking...
reponame = ''
context = formatter.context
while context:
if context.resource.realm in ('source', 'changeset'):
reponame = context.resource.parent.id
break
context = context.parent
try:
repos = self.env.get_repository(reponame)
if not repos:
raise Exception("Repository '%s' not found" % reponame)
sha = repos.normalize_rev(sha) # in case it was abbreviated
changeset = repos.get_changeset(sha)
return tag.a(label, class_='changeset',
title=shorten_line(changeset.message),
href=formatter.href.changeset(sha, repos.reponame))
except Exception, e:
return tag.a(label, class_='missing changeset',
title=to_unicode(e), rel='nofollow')
def get_wiki_syntax(self):
yield (r'(?:\b|!)r?[0-9a-fA-F]{%d,40}\b' % self.wiki_shortrev_len,
lambda fmt, sha, match:
self._format_sha_link(fmt, sha.startswith('r')
and sha[1:] or sha, sha))
def get_link_resolvers(self):
yield ('sha', lambda fmt, _, sha, label, match=None:
self._format_sha_link(fmt, sha, label))
# IRepositoryConnector methods
persistent_cache = BoolOption('git', 'persistent_cache', 'false',
"""Enable persistent caching of commit tree.""")
cached_repository = BoolOption('git', 'cached_repository', 'false',
"""Wrap `GitRepository` in `CachedRepository`.""")
shortrev_len = IntOption('git', 'shortrev_len', 7,
"""The length at which a sha1 should be abbreviated to (must
be >= 4 and <= 40).
""")
wiki_shortrev_len = IntOption('git', 'wikishortrev_len', 40,
"""The minimum length of an hex-string for which
auto-detection as sha1 is performed (must be >= 4 and <= 40).
""")
trac_user_rlookup = BoolOption('git', 'trac_user_rlookup', 'false',
"""Enable reverse mapping of git email addresses to trac user ids
(costly if you have many users).""")
use_committer_id = BoolOption('git', 'use_committer_id', 'true',
"""Use git-committer id instead of git-author id for the
changeset ''Author'' field.
""")
use_committer_time = BoolOption('git', 'use_committer_time', 'true',
"""Use git-committer timestamp instead of git-author timestamp
for the changeset ''Timestamp'' field.
""")
git_fs_encoding = Option('git', 'git_fs_encoding', 'utf-8',
"""Define charset encoding of paths within git repositories.""")
git_bin = Option('git', 'git_bin', 'git',
"""Path to the git executable.""")
def get_supported_types(self):
yield ('git', 8)
def get_repository(self, type, dir, params):
"""GitRepository factory method"""
assert type == 'git'
if not (4 <= self.shortrev_len <= 40):
raise TracError("[git] shortrev_len setting must be within [4..40]")
if not (4 <= self.wiki_shortrev_len <= 40):
raise TracError("[git] wikishortrev_len must be within [4..40]")
if not self._version:
raise TracError("GIT backend not available")
elif not self._version['v_compatible']:
raise TracError("GIT version %s installed not compatible"
"(need >= %s)" %
(self._version['v_str'],
self._version['v_min_str']))
if self.trac_user_rlookup:
def rlookup_uid(email):
"""Reverse map 'real name <user@domain.tld>' addresses to trac
user ids.
:return: `None` if lookup failed
"""
try:
_, email = email.rsplit('<', 1)
email, _ = email.split('>', 1)
email = email.lower()
except Exception:
return None
for _uid, _name, _email in self.env.get_known_users():
try:
if email == _email.lower():
return _uid
except Exception:
continue
else:
def rlookup_uid(_):
return None
repos = GitRepository(dir, params, self.log,
persistent_cache=self.persistent_cache,
git_bin=self.git_bin,
git_fs_encoding=self.git_fs_encoding,
shortrev_len=self.shortrev_len,
rlookup_uid=rlookup_uid,
use_committer_id=self.use_committer_id,
use_committer_time=self.use_committer_time,
)
if self.cached_repository:
repos = GitCachedRepository(self.env, repos, self.log)
self.log.debug("enabled CachedRepository for '%s'" % dir)
else:
self.log.debug("disabled CachedRepository for '%s'" % dir)
return repos
class CsetPropertyRenderer(Component):
implements(IPropertyRenderer)
# relied upon by GitChangeset
def match_property(self, name, mode):
# default renderer has priority 1
return (name in ('Parents',
'Children',
'Branches',
'git-committer',
'git-author',
) and mode == 'revprop') and 4 or 0
def render_property(self, name, mode, context, props):
def sha_link(sha, label=None):
# sha is assumed to be a non-abbreviated 40-chars sha id
try:
reponame = context.resource.parent.id
repos = self.env.get_repository(reponame)
cset = repos.get_changeset(sha)
if label is None:
label = repos.display_rev(sha)
return tag.a(label, class_='changeset',
title=shorten_line(cset.message),
href=context.href.changeset(sha, repos.reponame))
except Exception, e:
return tag.a(sha, class_='missing changeset',
title=to_unicode(e), rel='nofollow')
if name == 'Branches':
branches = props[name]
# simple non-merge commit
return tag(*intersperse(', ', (sha_link(rev, label)
for label, rev in branches)))
elif name in ('Parents', 'Children'):
revs = props[name] # list of commit ids
if name == 'Parents' and len(revs) > 1:
# we got a merge...
current_sha = context.resource.id
reponame = context.resource.parent.id
parent_links = intersperse(', ', \
((sha_link(rev),
' (',
tag.a('diff',
title="Diff against this parent (show the " \
"changes merged from the other parents)",
href=context.href.changeset(current_sha, reponame,
old=rev)),
')')
for rev in revs))
return tag(list(parent_links),
tag.br(),
tag.span(tag("Note: this is a ",
tag.strong("merge"), " changeset, "
"the changes displayed below "
"correspond to the merge itself."),
class_='hint'),
tag.br(),
tag.span(tag("Use the ", tag.tt("(diff)"),
" links above to see all the changes "
"relative to each parent."),
class_='hint'))
# simple non-merge commit
return tag(*intersperse(', ', map(sha_link, revs)))
elif name in ('git-committer', 'git-author'):
user_, time_ = props[name]
_str = "%s (%s)" % (
Chrome(self.env).format_author(context.req, user_),
format_datetime(time_, tzinfo=context.req.tz))
return unicode(_str)
raise TracError("Internal error")
class GitRepository(Repository):
"""Git repository"""
def __init__(self, path, params, log,
persistent_cache=False,
git_bin='git',
git_fs_encoding='utf-8',
shortrev_len=7,
rlookup_uid=lambda _: None,
use_committer_id=False,
use_committer_time=False,
):
self.logger = log
self.gitrepo = path
self.params = params
self.shortrev_len = max(4, min(shortrev_len, 40))
self.rlookup_uid = rlookup_uid
self.use_committer_time = use_committer_time
self.use_committer_id = use_committer_id
try:
self.git = PyGIT.StorageFactory(path, log, not persistent_cache,
git_bin=git_bin,
git_fs_encoding=git_fs_encoding) \
.getInstance()
except PyGIT.GitError, e:
raise TracError("%s does not appear to be a Git "
"repository." % path)
Repository.__init__(self, 'git:'+path, self.params, log)
def close(self):
self.git = None
def get_youngest_rev(self):
return self.git.youngest_rev()
def get_oldest_rev(self):
return self.git.oldest_rev()
def normalize_path(self, path):
return path and path.strip('/') or '/'
def normalize_rev(self, rev):
if not rev:
return self.get_youngest_rev()
normrev = self.git.verifyrev(rev)
if normrev is None:
raise NoSuchChangeset(rev)
return normrev
def display_rev(self, rev):
return self.short_rev(rev)
def short_rev(self, rev):
return self.git.shortrev(self.normalize_rev(rev),
min_len=self.shortrev_len)
def get_node(self, path, rev=None, historian=None):
return GitNode(self, path, rev, self.log, None, historian)
def get_quickjump_entries(self, rev):
for bname, bsha in self.git.get_branches():
yield 'branches', bname, '/', bsha
for t in self.git.get_tags():
yield 'tags', t, '/', t
def get_path_url(self, path, rev):
return self.params.get('url')
def get_changesets(self, start, stop):
for rev in self.git.history_timerange(to_timestamp(start),
to_timestamp(stop)):
yield self.get_changeset(rev)
def get_changeset(self, rev):
"""GitChangeset factory method"""
return GitChangeset(self, rev)
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=0):
# TODO: handle renames/copies, ignore_ancestry
if old_path != new_path:
raise TracError("not supported in git_fs")
with self.git.get_historian(old_rev,
old_path.strip('/')) as old_historian:
with self.git.get_historian(new_rev,
new_path.strip('/')) as new_historian:
for chg in self.git.diff_tree(old_rev, new_rev,
self.normalize_path(new_path)):
mode1, mode2, obj1, obj2, action, path, path2 = chg
kind = Node.FILE
if mode2.startswith('04') or mode1.startswith('04'):
kind = Node.DIRECTORY
change = GitChangeset.action_map[action]
old_node = None
new_node = None
if change != Changeset.ADD:
old_node = self.get_node(path, old_rev, old_historian)
if change != Changeset.DELETE:
new_node = self.get_node(path, new_rev, new_historian)
yield old_node, new_node, kind, change
def next_rev(self, rev, path=''):
return self.git.hist_next_revision(rev)
def previous_rev(self, rev, path=''):
return self.git.hist_prev_revision(rev)
def parent_revs(self, rev):
return self.git.parents(rev)
def child_revs(self, rev):
return self.git.children(rev)
def rev_older_than(self, rev1, rev2):
rc = self.git.rev_is_anchestor_of(rev1, rev2)
return rc
# def clear(self, youngest_rev=None):
# self.youngest = None
# if youngest_rev is not None:
# self.youngest = self.normalize_rev(youngest_rev)
# self.oldest = None
def clear(self, youngest_rev=None):
self.sync()
def sync(self, rev_callback=None, clean=None):
if rev_callback:
revs = set(self.git.all_revs())
if not self.git.sync():
return None # nothing expected to change
if rev_callback:
revs = set(self.git.all_revs()) - revs
for rev in revs:
rev_callback(rev)
class GitNode(Node):
def __init__(self, repos, path, rev, log, ls_tree_info=None,
historian=None):
self.log = log
self.repos = repos
self.fs_sha = None # points to either tree or blobs
self.fs_perm = None
self.fs_size = None
rev = rev and str(rev) or 'HEAD'
kind = Node.DIRECTORY
p = path.strip('/')
if p: # ie. not the root-tree
if not ls_tree_info:
ls_tree_info = repos.git.ls_tree(rev, p) or None
if ls_tree_info:
[ls_tree_info] = ls_tree_info
if not ls_tree_info:
raise NoSuchNode(path, rev)
self.fs_perm, k, self.fs_sha, self.fs_size, _ = ls_tree_info
# fix-up to the last commit-rev that touched this node
rev = repos.git.last_change(rev, p, historian)
if k == 'tree':
pass
elif k == 'commit':
# FIXME: this is a workaround for missing git submodule
# support in the plugin
pass
elif k == 'blob':
kind = Node.FILE
else:
raise TracError("Internal error (got unexpected object " \
"kind '%s')" % k)
self.created_path = path
self.created_rev = rev
Node.__init__(self, repos, path, rev, kind)
def __git_path(self):
"""return path as expected by PyGIT"""
p = self.path.strip('/')
if self.isfile:
assert p
return p
if self.isdir:
return p and (p + '/')
raise TracError("internal error")
def get_content(self):
if not self.isfile:
return None
return self.repos.git.get_file(self.fs_sha)
def get_properties(self):
return self.fs_perm and {'mode': self.fs_perm } or {}
def get_annotations(self):
if not self.isfile:
return
return [rev for rev, lineno in \
self.repos.git.blame(self.rev,self.__git_path())]
def get_entries(self):
if not self.isdir:
return
with self.repos.git.get_historian(self.rev,
self.path.strip('/')) as historian:
for ent in self.repos.git.ls_tree(self.rev, self.__git_path()):
yield GitNode(self.repos, ent[-1], self.rev, self.log, ent,
historian)
def get_content_type(self):
if self.isdir:
return None
return ''
def get_content_length(self):
if not self.isfile:
return None
if self.fs_size is None:
self.fs_size = self.repos.git.get_obj_size(self.fs_sha)
return self.fs_size
def get_history(self, limit=None):
# TODO: find a way to follow renames/copies
for is_last, rev in _last_iterable(self.repos.git.history(self.rev,
self.__git_path(), limit)):
yield (self.path, rev, Changeset.EDIT if not is_last else
Changeset.ADD)
def get_last_modified(self):
if not self.isfile:
return None
try:
msg, props = self.repos.git.read_commit(self.rev)
user, ts = _parse_user_time(props['committer'][0])
except:
self.log.error("internal error (could not get timestamp from "
"commit '%s')" % self.rev)
return None
return ts
class GitChangeset(Changeset):
"""A Git changeset in the Git repository.
Corresponds to a Git commit blob.
"""
action_map = { # see also git-diff-tree(1) --diff-filter
'A': Changeset.ADD,
'M': Changeset.EDIT, # modified
'T': Changeset.EDIT, # file type (mode) change
'D': Changeset.DELETE,
'R': Changeset.MOVE, # renamed
'C': Changeset.COPY
} # TODO: U, X, B
def __init__(self, repos, sha):
if sha is None:
raise NoSuchChangeset(sha)
try:
msg, props = repos.git.read_commit(sha)
except PyGIT.GitErrorSha:
raise NoSuchChangeset(sha)
self.props = props
assert 'children' not in props
_children = list(repos.git.children(sha))
if _children:
props['children'] = _children
committer, author = self._get_committer_and_author()
# use 1st author/committer as changeset owner/timestamp
c_user = a_user = c_time = a_time = None
if committer:
c_user, c_time = _parse_user_time(committer)
if author:
a_user, a_time = _parse_user_time(author)
if repos.use_committer_time:
time = c_time or a_time
else:
time = a_time or c_time
if repos.use_committer_id:
user = c_user or a_user
else:
user = a_user or c_user
# try to resolve email address to trac uid
user = repos.rlookup_uid(user) or user
Changeset.__init__(self, repos, rev=sha, message=msg, author=user,
date=time)
def _get_committer_and_author(self):
committer = author = None
if 'committer' in self.props:
committer = self.props['committer'][0]
if 'author' in self.props:
author = self.props['author'][0]
return committer, author
def get_properties(self):
properties = {}
if 'parent' in self.props:
properties['Parents'] = self.props['parent']
if 'children' in self.props:
properties['Children'] = self.props['children']
committer, author = self._get_committer_and_author()
if author != committer:
properties['git-committer'] = _parse_user_time(committer)
properties['git-author'] = _parse_user_time(author)
branches = list(self.repos.git.get_branch_contains(self.rev,
resolve=True))
if branches:
properties['Branches'] = branches
return properties
def get_changes(self):
paths_seen = set()
for parent in self.props.get('parent', [None]):
for mode1, mode2, obj1, obj2, action, path1, path2 in \
self.repos.git.diff_tree(parent, self.rev,
find_renames=True):
path = path2 or path1
p_path, p_rev = path1, parent
kind = Node.FILE
if mode2.startswith('04') or mode1.startswith('04'):
kind = Node.DIRECTORY
action = GitChangeset.action_map[action[0]]
if action == Changeset.ADD:
p_path = ''
p_rev = None
# CachedRepository expects unique (rev, path, change_type) key
# this is only an issue in case of merges where files required
# editing
if path in paths_seen:
continue
paths_seen.add(path)
yield path, kind, action, p_path, p_rev
def get_branches(self):
_rev = self.rev
return [(k, v == _rev)
for k, v in self.repos.git.get_branch_contains(_rev,
resolve=True)]
class GitwebProjectsRepositoryProvider(Component):
implements(IRepositoryProvider)
projects_list = PathOption('git', 'projects_list', doc=
"""Path to a gitweb-formatted projects.list""")
projects_base = PathOption('git', 'projects_base', doc=
"""Path to the base of your git projects""")
projects_url = Option('git', 'projects_url', doc=
"""Template for project URLs. %s will be replaced with the repo
name""")
def get_repositories(self):
if not self.projects_list:
return
for line in open(self.projects_list):
line = line.strip()
name = line
if name.endswith('.git'):
name = name[:-4]
repo = {
'dir': os.path.join(self.projects_base, line),
'type': 'git',
}
description_path = os.path.join(repo['dir'], 'description')
if os.path.exists(description_path):
repo['description'] = open(description_path).read().strip()
if self.projects_url:
repo['url'] = self.projects_url % name
yield name, repo
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/git/tests/PyGIT.py | trac/tracopt/versioncontrol/git/tests/PyGIT.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import shutil
import tempfile
import unittest
from subprocess import Popen, PIPE
from trac.test import locate, EnvironmentStub
from trac.util import create_file
from trac.util.compat import close_fds
from tracopt.versioncontrol.git.PyGIT import GitCore, Storage, parse_commit
class GitTestCase(unittest.TestCase):
def test_is_sha(self):
self.assertTrue(not GitCore.is_sha('123'))
self.assertTrue(GitCore.is_sha('1a3f'))
self.assertTrue(GitCore.is_sha('f' * 40))
self.assertTrue(not GitCore.is_sha('x' + 'f' * 39))
self.assertTrue(not GitCore.is_sha('f' * 41))
def test_git_version(self):
v = Storage.git_version()
self.assertTrue(v)
self.assertTrue(v['v_compatible'])
class TestParseCommit(unittest.TestCase):
# The ''' ''' lines are intended to keep lines with trailing whitespace
commit2240a7b = '''\
tree b19535236cfb6c64b798745dd3917dafc27bcd0a
parent 30aaca4582eac20a52ac7b2ec35bdb908133e5b1
parent 5a0dc7365c240795bf190766eba7a27600be3b3e
author Linus Torvalds <torvalds@linux-foundation.org> 1323915958 -0800
committer Linus Torvalds <torvalds@linux-foundation.org> 1323915958 -0800
mergetag object 5a0dc7365c240795bf190766eba7a27600be3b3e
type commit
tag tytso-for-linus-20111214A
tagger Theodore Ts'o <tytso@mit.edu> 1323890113 -0500
''' '''
tytso-for-linus-20111214
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
''' '''
iQIcBAABCAAGBQJO6PXBAAoJENNvdpvBGATwpuEP/2RCxmdWYZ8/6Z6pmTh3hHN5
fx6HckTdvLQOvbQs72wzVW0JKyc25QmW2mQc5z3MjSymjf/RbEKihPUITRNbHrTD
T2sP/lWu09AKLioEg4ucAKn/A7Do3UDIkXTszvVVP/t2psVPzLeJ1njQKra14Nyz
o0+gSlnwuGx9WaxfR+7MYNs2ikdSkXIeYsiFAOY4YOxwwC99J/lZ0YaNkbI7UBtC
yu2XLIvPboa5JZXANq2G3VhVIETMmOyRTCC76OAXjqkdp9nLFWDG0ydqQh0vVZwL
xQGOmAj+l3BNTE0QmMni1w7A0SBU3N6xBA5HN6Y49RlbsMYG27aN54Fy5K2R41I3
QXVhBL53VD6b0KaITcoz7jIGIy6qk9Wx+2WcCYtQBSIjL2YwlaJq0PL07+vRamex
sqHGDejcNY87i6AV0DP6SNuCFCi9xFYoAoMi9Wu5E9+T+Vck0okFzW/luk/FvsSP
YA5Dh+vISyBeCnWQvcnBmsUQyf8d9MaNnejZ48ath+GiiMfY8USAZ29RAG4VuRtS
9DAyTTIBA73dKpnvEV9u4i8Lwd8hRVMOnPyOO785NwEXk3Ng08pPSSbMklW6UfCY
4nr5UNB13ZPbXx4uoAvATMpCpYxMaLEdxmeMvgXpkekl0hHBzpVDey1Vu9fb/a5n
dQpo6WWG9HIJ23hOGAGR
=n3Lm
-----END PGP SIGNATURE-----
Merge tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
ext4: handle EOF correctly in ext4_bio_write_page()
ext4: remove a wrong BUG_ON in ext4_ext_convert_to_initialized
ext4: correctly handle pages w/o buffers in ext4_discard_partial_buffers()
ext4: avoid potential hang in mpage_submit_io() when blocksize < pagesize
ext4: avoid hangs in ext4_da_should_update_i_disksize()
ext4: display the correct mount option in /proc/mounts for [no]init_itable
ext4: Fix crash due to getting bogus eh_depth value on big-endian systems
ext4: fix ext4_end_io_dio() racing against fsync()
.. using the new signed tag merge of git that now verifies the gpg
signature automatically. Yay. The branchname was just 'dev', which is
prettier. I'll tell Ted to use nicer tag names for future cases.
'''
def test_parse(self):
msg, props = parse_commit(self.commit2240a7b)
self.assertTrue(msg)
self.assertTrue(props)
self.assertEquals(
['30aaca4582eac20a52ac7b2ec35bdb908133e5b1',
'5a0dc7365c240795bf190766eba7a27600be3b3e'],
props['parent'])
self.assertEquals(
['Linus Torvalds <torvalds@linux-foundation.org> 1323915958 -0800'],
props['author'])
self.assertEquals(props['author'], props['committer'])
# Merge tag
self.assertEquals(['''\
object 5a0dc7365c240795bf190766eba7a27600be3b3e
type commit
tag tytso-for-linus-20111214A
tagger Theodore Ts\'o <tytso@mit.edu> 1323890113 -0500
tytso-for-linus-20111214
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
iQIcBAABCAAGBQJO6PXBAAoJENNvdpvBGATwpuEP/2RCxmdWYZ8/6Z6pmTh3hHN5
fx6HckTdvLQOvbQs72wzVW0JKyc25QmW2mQc5z3MjSymjf/RbEKihPUITRNbHrTD
T2sP/lWu09AKLioEg4ucAKn/A7Do3UDIkXTszvVVP/t2psVPzLeJ1njQKra14Nyz
o0+gSlnwuGx9WaxfR+7MYNs2ikdSkXIeYsiFAOY4YOxwwC99J/lZ0YaNkbI7UBtC
yu2XLIvPboa5JZXANq2G3VhVIETMmOyRTCC76OAXjqkdp9nLFWDG0ydqQh0vVZwL
xQGOmAj+l3BNTE0QmMni1w7A0SBU3N6xBA5HN6Y49RlbsMYG27aN54Fy5K2R41I3
QXVhBL53VD6b0KaITcoz7jIGIy6qk9Wx+2WcCYtQBSIjL2YwlaJq0PL07+vRamex
sqHGDejcNY87i6AV0DP6SNuCFCi9xFYoAoMi9Wu5E9+T+Vck0okFzW/luk/FvsSP
YA5Dh+vISyBeCnWQvcnBmsUQyf8d9MaNnejZ48ath+GiiMfY8USAZ29RAG4VuRtS
9DAyTTIBA73dKpnvEV9u4i8Lwd8hRVMOnPyOO785NwEXk3Ng08pPSSbMklW6UfCY
4nr5UNB13ZPbXx4uoAvATMpCpYxMaLEdxmeMvgXpkekl0hHBzpVDey1Vu9fb/a5n
dQpo6WWG9HIJ23hOGAGR
=n3Lm
-----END PGP SIGNATURE-----'''], props['mergetag'])
# Message
self.assertEquals("""Merge tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* tag 'tytso-for-linus-20111214' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
ext4: handle EOF correctly in ext4_bio_write_page()
ext4: remove a wrong BUG_ON in ext4_ext_convert_to_initialized
ext4: correctly handle pages w/o buffers in ext4_discard_partial_buffers()
ext4: avoid potential hang in mpage_submit_io() when blocksize < pagesize
ext4: avoid hangs in ext4_da_should_update_i_disksize()
ext4: display the correct mount option in /proc/mounts for [no]init_itable
ext4: Fix crash due to getting bogus eh_depth value on big-endian systems
ext4: fix ext4_end_io_dio() racing against fsync()
.. using the new signed tag merge of git that now verifies the gpg
signature automatically. Yay. The branchname was just 'dev', which is
prettier. I'll tell Ted to use nicer tag names for future cases.""", msg)
class UnicodeNameTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.repos_path = tempfile.mkdtemp(prefix='trac-gitrepos')
self.git_bin = locate('git')
# create git repository and master branch
self._git('init', self.repos_path)
create_file(os.path.join(self.repos_path, '.gitignore'))
self._git('add', '.gitignore')
self._git('commit', '-a', '-m', 'test')
def tearDown(self):
if os.path.isdir(self.repos_path):
shutil.rmtree(self.repos_path)
def _git(self, *args):
args = [self.git_bin] + list(args)
proc = Popen(args, stdout=PIPE, stderr=PIPE, close_fds=close_fds,
cwd=self.repos_path)
proc.wait()
assert proc.returncode == 0
return proc
def _storage(self):
path = os.path.join(self.repos_path, '.git')
return Storage(path, self.env.log, self.git_bin, 'utf-8')
def test_unicode_verifyrev(self):
storage = self._storage()
self.assertNotEqual(None, storage.verifyrev(u'master'))
self.assertEquals(None, storage.verifyrev(u'tété'))
def test_unicode_filename(self):
create_file(os.path.join(self.repos_path, 'tickét.txt'))
self._git('add', 'tickét.txt')
self._git('commit', '-m', 'unicode-filename')
storage = self._storage()
filenames = sorted(fname for mode, type, sha, size, fname
in storage.ls_tree('HEAD'))
self.assertEquals(unicode, type(filenames[0]))
self.assertEquals(unicode, type(filenames[1]))
self.assertEquals(u'.gitignore', filenames[0])
self.assertEquals(u'tickét.txt', filenames[1])
def test_unicode_branches(self):
self._git('checkout', '-b', 'tickét10980', 'master')
storage = self._storage()
branches = sorted(storage.get_branches())
self.assertEquals(unicode, type(branches[0][0]))
self.assertEquals(unicode, type(branches[1][0]))
self.assertEquals(u'master', branches[0][0])
self.assertEquals(u'tickét10980', branches[1][0])
contains = sorted(storage.get_branch_contains(branches[1][1],
resolve=True))
self.assertEquals(unicode, type(contains[0][0]))
self.assertEquals(unicode, type(contains[1][0]))
self.assertEquals(u'master', contains[0][0])
self.assertEquals(u'tickét10980', contains[1][0])
def test_unicode_tags(self):
self._git('tag', 'täg-t10980', 'master')
storage = self._storage()
tags = tuple(storage.get_tags())
self.assertEquals(unicode, type(tags[0]))
self.assertEquals(u'täg-t10980', tags[0])
self.assertNotEqual(None, storage.verifyrev(u'täg-t10980'))
#class GitPerformanceTestCase(unittest.TestCase):
# """Performance test. Not really a unit test.
# Not self-contained: Needs a git repository and prints performance result
# instead of testing anything.
# TODO: Move to a profiling script?"""
#
# def test_performance(self):
# import logging
# import timeit
#
# g = Storage(path_to_repo, logging) # Need a git repository path here
# revs = g.get_commits().keys()
#
# def shortrev_test():
# for i in revs:
# i = str(i)
# s = g.shortrev(i, min_len=4)
# self.assertTrue(i.startswith(s))
# self.assertEquals(g.fullrev(s), i)
#
# iters = 1
# t = timeit.Timer("shortrev_test()",
# "from __main__ import shortrev_test")
# usec_per_rev = (1000000 * t.timeit(number=iters)/len(revs))
# print "%.2f usec/rev" % usec_per_rev # Print instead of testing
#class GitMemoryUsageTestCase(unittest.TestCase):
# """Memory test. Not really a unit test.
# Not self-contained: Needs a git repository and prints memory usage
# instead of testing anything.
# TODO: Move to a profiling script?"""
#
# def test_memory_usage(self):
# import logging
# import sys
#
# # custom linux hack reading `/proc/<PID>/statm`
# if sys.platform == 'linux2':
# __pagesize = os.sysconf('SC_PAGESIZE')
#
# def proc_statm(pid = os.getpid()):
# __proc_statm = '/proc/%d/statm' % pid
# try:
# t = open(__proc_statm)
# result = t.read().split()
# t.close()
# assert len(result) == 7
# return tuple([ __pagesize*int(p) for p in result ])
# except:
# raise RuntimeError("failed to get memory stats")
#
# else: # not linux2
# print "WARNING - meminfo.proc_statm() not available"
# def proc_statm():
# return (0,)*7
#
# print "statm =", proc_statm()
# __data_size = proc_statm()[5]
# __data_size_last = [__data_size]
#
# def print_data_usage():
# __tmp = proc_statm()[5]
# print "DATA: %6d %+6d" % (__tmp - __data_size,
# __tmp - __data_size_last[0])
# __data_size_last[0] = __tmp
#
# print_data_usage()
#
# g = Storage(path_to_repo, logging) # Need a git repository path here
#
# print_data_usage()
#
# print "[%s]" % g.head()
# print g.ls_tree(g.head())
# print "--------------"
# print_data_usage()
# print g.read_commit(g.head())
# print "--------------"
# print_data_usage()
# p = g.parents(g.head())
# print list(p)
# print "--------------"
# print list(g.children(list(p)[0]))
# print list(g.children(list(p)[0]))
# print "--------------"
# print g.get_commit_encoding()
# print "--------------"
# print g.get_branches()
# print "--------------"
# print g.hist_prev_revision(g.oldest_rev()), g.oldest_rev(), \
# g.hist_next_revision(g.oldest_rev())
# print_data_usage()
# print "--------------"
# p = g.youngest_rev()
# print g.hist_prev_revision(p), p, g.hist_next_revision(p)
# print "--------------"
#
# p = g.head()
# for i in range(-5, 5):
# print i, g.history_relative_rev(p, i)
#
# # check for loops
# def check4loops(head):
# print "check4loops", head
# seen = set([head])
# for _sha in g.children_recursive(head):
# if _sha in seen:
# print "dupe detected :-/", _sha, len(seen)
# seen.add(_sha)
# return seen
#
# print len(check4loops(g.parents(g.head())[0]))
#
# #p = g.head()
# #revs = [ g.history_relative_rev(p, i) for i in range(0,10) ]
# print_data_usage()
# revs = g.get_commits().keys()
# print_data_usage()
#
# #print len(check4loops(g.oldest_rev()))
# #print len(list(g.children_recursive(g.oldest_rev())))
#
# print_data_usage()
#
# # perform typical trac operations:
#
# if 1:
# print "--------------"
# rev = g.head()
# for mode, _type, sha, _size, name in g.ls_tree(rev):
# [last_rev] = g.history(rev, name, limit=1)
# s = g.get_obj_size(sha) if _type == 'blob' else 0
# msg = g.read_commit(last_rev)
#
# print "%s %s %10d [%s]" % (_type, last_rev, s, name)
#
# print "allocating 2nd instance"
# print_data_usage()
# g2 = Storage(path_to_repo, logging) # Need a git repository path here
# g2.head()
# print_data_usage()
#
# print "allocating 3rd instance"
# g3 = Storage(path_to_repo, logging) # Need a git repository path here
# g3.head()
# print_data_usage()
def suite():
suite = unittest.TestSuite()
git = locate("git")
if git:
suite.addTest(unittest.makeSuite(GitTestCase, 'test'))
suite.addTest(unittest.makeSuite(TestParseCommit, 'test'))
if os.name != 'nt':
# Popen doesn't accept unicode path and arguments on Windows
suite.addTest(unittest.makeSuite(UnicodeNameTestCase, 'test'))
else:
print("SKIP: tracopt/versioncontrol/git/tests/PyGIT.py (git cli "
"binary, 'git', not found)")
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/versioncontrol/git/tests/__init__.py | trac/tracopt/versioncontrol/git/tests/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from tracopt.versioncontrol.git.tests import PyGIT
def suite():
suite = unittest.TestSuite()
suite.addTest(PyGIT.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/perm/__init__.py | trac/tracopt/perm/__init__.py | python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false | |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/perm/authz_policy.py | trac/tracopt/perm/authz_policy.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# Copyright (C) 2007 Alec Thomas <alec@swapoff.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Alec Thomas <alec@swapoff.org>
from fnmatch import fnmatch
from itertools import groupby
import os
from trac.core import *
from trac.config import Option
from trac.perm import PermissionSystem, IPermissionPolicy
ConfigObj = None
try:
from configobj import ConfigObj
except ImportError:
pass
class AuthzPolicy(Component):
"""Permission policy using an authz-like configuration file.
Refer to SVN documentation for syntax of the authz file. Groups are
supported.
As the fine-grained permissions brought by this permission policy are
often used in complement of the other pemission policies (like the
`DefaultPermissionPolicy`), there's no need to redefine all the
permissions here. Only additional rights or restrictions should be added.
=== Installation ===
Note that this plugin requires the `configobj` package:
http://www.voidspace.org.uk/python/configobj.html
You should be able to install it by doing a simple `easy_install configobj`
Enabling this policy requires listing it in `trac.ini:
{{{
[trac]
permission_policies = AuthzPolicy, DefaultPermissionPolicy
[authz_policy]
authz_file = conf/authzpolicy.conf
}}}
This means that the `AuthzPolicy` permissions will be checked first, and
only if no rule is found will the `DefaultPermissionPolicy` be used.
=== Configuration ===
The `authzpolicy.conf` file is a `.ini` style configuration file.
- Each section of the config is a glob pattern used to match against a
Trac resource descriptor. These descriptors are in the form:
{{{
<realm>:<id>@<version>[/<realm>:<id>@<version> ...]
}}}
Resources are ordered left to right, from parent to child. If any
component is inapplicable, `*` is substituted. If the version pattern is
not specified explicitely, all versions (`@*`) is added implicitly
Example: Match the WikiStart page
{{{
[wiki:*]
[wiki:WikiStart*]
[wiki:WikiStart@*]
[wiki:WikiStart]
}}}
Example: Match the attachment `wiki:WikiStart@117/attachment/FOO.JPG@*`
on WikiStart
{{{
[wiki:*]
[wiki:WikiStart*]
[wiki:WikiStart@*]
[wiki:WikiStart@*/attachment/*]
[wiki:WikiStart@117/attachment/FOO.JPG]
}}}
- Sections are checked against the current Trac resource '''IN ORDER''' of
appearance in the configuration file. '''ORDER IS CRITICAL'''.
- Once a section matches, the current username is matched, '''IN ORDER''',
against the keys of the section. If a key is prefixed with a `@`, it is
treated as a group. If a key is prefixed with a `!`, the permission is
denied rather than granted. The username will match any of 'anonymous',
'authenticated', <username> or '*', using normal Trac permission rules.
Example configuration:
{{{
[groups]
administrators = athomas
[*/attachment:*]
* = WIKI_VIEW, TICKET_VIEW
[wiki:WikiStart@*]
@administrators = WIKI_ADMIN
anonymous = WIKI_VIEW
* = WIKI_VIEW
# Deny access to page templates
[wiki:PageTemplates/*]
* =
# Match everything else
[*]
@administrators = TRAC_ADMIN
anonymous = BROWSER_VIEW, CHANGESET_VIEW, FILE_VIEW, LOG_VIEW,
MILESTONE_VIEW, POLL_VIEW, REPORT_SQL_VIEW, REPORT_VIEW, ROADMAP_VIEW,
SEARCH_VIEW, TICKET_CREATE, TICKET_MODIFY, TICKET_VIEW, TIMELINE_VIEW,
WIKI_CREATE, WIKI_MODIFY, WIKI_VIEW
# Give authenticated users some extra permissions
authenticated = REPO_SEARCH, XML_RPC
}}}
"""
implements(IPermissionPolicy)
authz_file = Option('authz_policy', 'authz_file', '',
'Location of authz policy configuration file.')
authz = None
authz_mtime = None
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
if ConfigObj is None:
self.log.error('configobj package not found')
return None
if self.authz_file and not self.authz_mtime or \
os.path.getmtime(self.get_authz_file()) > self.authz_mtime:
self.parse_authz()
resource_key = self.normalise_resource(resource)
self.log.debug('Checking %s on %s', action, resource_key)
permissions = self.authz_permissions(resource_key, username)
if permissions is None:
return None # no match, can't decide
elif permissions == ['']:
return False # all actions are denied
# FIXME: expand all permissions once for all
ps = PermissionSystem(self.env)
for deny, perms in groupby(permissions,
key=lambda p: p.startswith('!')):
if deny and action in ps.expand_actions([p[1:] for p in perms]):
return False # action is explicitly denied
elif action in ps.expand_actions(perms):
return True # action is explicitly granted
return None # no match for action, can't decide
# Internal methods
def get_authz_file(self):
f = self.authz_file
return f if os.path.isabs(f) else os.path.join(self.env.path, f)
def parse_authz(self):
self.log.debug('Parsing authz security policy %s',
self.get_authz_file())
self.authz = ConfigObj(self.get_authz_file(), encoding='utf8')
groups = {}
for group, users in self.authz.get('groups', {}).iteritems():
if isinstance(users, basestring):
users = [users]
groups[group] = users
self.groups_by_user = {}
def add_items(group, items):
for item in items:
if item.startswith('@'):
add_items(group, groups[item[1:]])
else:
self.groups_by_user.setdefault(item, set()).add(group)
for group, users in groups.iteritems():
add_items('@' + group, users)
self.authz_mtime = os.path.getmtime(self.get_authz_file())
def normalise_resource(self, resource):
def flatten(resource):
if not resource:
return ['*:*@*']
if not (resource.realm or resource.id):
return ['%s:%s@%s' % (resource.realm or '*',
resource.id or '*',
resource.version or '*')]
# XXX Due to the mixed functionality in resource we can end up with
# ticket, ticket:1, ticket:1@10. This code naively collapses all
# subsets of the parent resource into one. eg. ticket:1@10
parent = resource.parent
while parent and (resource.realm == parent.realm or
(resource.realm == parent.realm and
resource.id == parent.id)):
parent = parent.parent
if parent:
parent = flatten(parent)
else:
parent = []
return parent + ['%s:%s@%s' % (resource.realm or '*',
resource.id or '*',
resource.version or '*')]
return '/'.join(flatten(resource))
def authz_permissions(self, resource_key, username):
# TODO: Handle permission negation in sections. eg. "if in this
# ticket, remove TICKET_MODIFY"
if username and username != 'anonymous':
valid_users = ['*', 'authenticated', username]
else:
valid_users = ['*', 'anonymous']
for resource_section in [a for a in self.authz.sections
if a != 'groups']:
resource_glob = resource_section
if '@' not in resource_glob:
resource_glob += '@*'
if fnmatch(resource_key, resource_glob):
section = self.authz[resource_section]
for who, permissions in section.iteritems():
if who in valid_users or \
who in self.groups_by_user.get(username, []):
self.log.debug('%s matched section %s for user %s',
resource_key, resource_glob, username)
if isinstance(permissions, basestring):
return [permissions]
else:
return permissions
return None
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/perm/config_perm_provider.py | trac/tracopt/perm/config_perm_provider.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import *
from trac.config import ConfigSection
from trac.perm import IPermissionRequestor
class ExtraPermissionsProvider(Component):
"""Extra permission provider."""
implements(IPermissionRequestor)
extra_permissions_section = ConfigSection('extra-permissions',
doc="""This section provides a way to add arbitrary permissions to a
Trac environment. This can be useful for adding new permissions to use
for workflow actions, for example.
To add new permissions, create a new section `[extra-permissions]` in
your `trac.ini`. Every entry in that section defines a meta-permission
and a comma-separated list of permissions. For example:
{{{
[extra-permissions]
extra_admin = extra_view, extra_modify, extra_delete
}}}
This entry will define three new permissions `EXTRA_VIEW`,
`EXTRA_MODIFY` and `EXTRA_DELETE`, as well as a meta-permissions
`EXTRA_ADMIN` that grants all three permissions.
If you don't want a meta-permission, start the meta-name with an
underscore (`_`):
{{{
[extra-permissions]
_perms = extra_view, extra_modify
}}}
""")
def get_permission_actions(self):
permissions = {}
for meta, perms in self.extra_permissions_section.options():
perms = [each.strip().upper() for each in perms.split(',')]
for perm in perms:
permissions.setdefault(perm, [])
meta = meta.strip().upper()
if meta and not meta.startswith('_'):
permissions.setdefault(meta, []).extend(perms)
return [(k, v) if v else k for k, v in permissions.iteritems()]
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/perm/tests/__init__.py | trac/tracopt/perm/tests/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from tracopt.perm.tests import authz_policy
def suite():
suite = unittest.TestSuite()
suite.addTest(authz_policy.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/perm/tests/authz_policy.py | trac/tracopt/perm/tests/authz_policy.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import tempfile
import unittest
try:
from configobj import ConfigObj
except ImportError:
ConfigObj = None
from trac.resource import Resource
from trac.test import EnvironmentStub
from trac.util import create_file
from tracopt.perm.authz_policy import AuthzPolicy
class AuthzPolicyTestCase(unittest.TestCase):
def setUp(self):
tmpdir = os.path.realpath(tempfile.gettempdir())
self.authz_file = os.path.join(tmpdir, 'trac-authz-policy')
create_file(self.authz_file, """\
# Unicode user names
[groups]
administrators = éat
[wiki:WikiStart]
änon = WIKI_VIEW
@administrators = WIKI_VIEW
* =
# Unicode page names
[wiki:résumé]
änon =
@administrators = WIKI_VIEW
* =
""")
self.env = EnvironmentStub(enable=[AuthzPolicy])
self.env.config.set('authz_policy', 'authz_file', self.authz_file)
self.authz_policy = AuthzPolicy(self.env)
def tearDown(self):
self.env.reset_db()
os.remove(self.authz_file)
def check_permission(self, action, user, resource, perm):
return self.authz_policy.check_permission(action, user, resource, perm)
def test_unicode_username(self):
resource = Resource('wiki', 'WikiStart')
self.assertEqual(
False,
self.check_permission('WIKI_VIEW', 'anonymous', resource, None))
self.assertEqual(
True,
self.check_permission('WIKI_VIEW', u'änon', resource, None))
def test_unicode_resource_name(self):
resource = Resource('wiki', u'résumé')
self.assertEqual(
False,
self.check_permission('WIKI_VIEW', 'anonymous', resource, None))
self.assertEqual(
False,
self.check_permission('WIKI_VIEW', u'änon', resource, None))
self.assertEqual(
True,
self.check_permission('WIKI_VIEW', u'éat', resource, None))
def suite():
suite = unittest.TestSuite()
if ConfigObj:
suite.addTest(unittest.makeSuite(AuthzPolicyTestCase, 'test'))
else:
print "SKIP: tracopt/perm/tests/authz_policy.py (no configobj " + \
"installed)"
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/mimeview/enscript.py | trac/tracopt/mimeview/enscript.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
from genshi.core import Markup
from trac.config import Option, ListOption
from trac.core import *
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.util import NaivePopen
from trac.util.html import Deuglifier
__all__ = ['EnscriptRenderer']
types = {
'application/xhtml+xml': ('html', 2),
'application/postscript': ('postscript', 2),
'application/x-csh': ('csh', 2),
'application/x-javascript': ('javascript', 2),
'application/x-troff': ('nroff', 2),
'text/html': ('html', 2),
'text/x-ada': ('ada', 2),
'text/x-asm': ('asm', 2),
'text/x-awk': ('awk', 2),
'text/x-c++src': ('cpp', 2),
'text/x-c++hdr': ('cpp', 2),
'text/x-chdr': ('c', 2),
'text/x-csh': ('csh', 2),
'text/x-csrc': ('c', 2),
'text/x-diff': ('diffu', 2), # Assume unified diff (works otherwise)
'text/x-eiffel': ('eiffel', 2),
'text/x-elisp': ('elisp', 2),
'text/x-fortran': ('fortran', 2),
'text/x-haskell': ('haskell', 2),
'text/x-idl': ('idl', 2),
'text/x-inf': ('inf', 2),
'text/x-java': ('java', 2),
'text/x-javascript': ('javascript', 2),
'text/x-ksh': ('ksh', 2),
'text/x-lua': ('lua', 2),
'text/x-m4': ('m4', 2),
'text/x-makefile': ('makefile', 2),
'text/x-mail': ('mail', 2),
'text/x-matlab': ('matlab', 2),
'text/x-objc': ('objc', 2),
'text/x-pascal': ('pascal', 2),
'text/x-perl': ('perl', 2),
'text/x-pyrex': ('pyrex', 2),
'text/x-python': ('python', 2),
'text/x-rfc': ('rfc', 2),
'text/x-ruby': ('ruby', 2),
'text/x-sh': ('sh', 2),
'text/x-scheme': ('scheme', 2),
'text/x-sql': ('sql', 2),
'text/x-tcl': ('tcl', 2),
'text/x-tex': ('tex', 2),
'text/x-vba': ('vba', 2),
'text/x-verilog': ('verilog', 2),
'text/x-vhdl': ('vhdl', 2),
'model/vrml': ('vrml', 2),
'application/x-sh': ('sh', 2),
'text/x-zsh': ('zsh', 2),
'text/vnd.wap.wmlscript': ('wmlscript', 2),
}
class EnscriptDeuglifier(Deuglifier):
@classmethod
def rules(cls):
return [
r'(?P<comment><FONT COLOR="#B22222">)',
r'(?P<keyword><FONT COLOR="#5F9EA0">)',
r'(?P<type><FONT COLOR="#228B22">)',
r'(?P<string><FONT COLOR="#BC8F8F">)',
r'(?P<func><FONT COLOR="#0000FF">)',
r'(?P<prep><FONT COLOR="#B8860B">)',
r'(?P<lang><FONT COLOR="#A020F0">)',
r'(?P<var><FONT COLOR="#DA70D6">)',
r'(?P<font><FONT.*?>)',
r'(?P<endfont></FONT>)'
]
class EnscriptRenderer(Component):
"""Syntax highlighter using GNU Enscript."""
implements(IHTMLPreviewRenderer)
expand_tabs = True
returns_source = True
path = Option('mimeviewer', 'enscript_path', 'enscript',
"""Path to the Enscript executable.""")
enscript_modes = ListOption('mimeviewer', 'enscript_modes',
'text/x-dylan:dylan:4', doc=
"""List of additional MIME types known by Enscript.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Enscript mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion.
That can also be used to override the default
quality ratio used by the Enscript render, which is 2
(''since 0.10'').""")
def __init__(self):
self._types = None
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if not self._types:
self._types = {}
self._types.update(types)
self._types.update(
Mimeview(self.env).configured_modes_mapping('enscript'))
return self._types.get(mimetype, (None, 0))[1]
def render(self, context, mimetype, content, filename=None, rev=None):
cmdline = self.path
mimetype = mimetype.split(';', 1)[0] # strip off charset
mode = self._types[mimetype][0]
cmdline += ' --color -h -q --language=html -p - -E%s' % mode
self.log.debug("Enscript command line: %s" % cmdline)
np = NaivePopen(cmdline, content.encode('utf-8'), capturestderr=1)
if np.errorlevel or np.err:
self.env.disable_component(self)
err = "Running enscript failed with (%s, %s), disabling " \
"EnscriptRenderer (command: '%s')" \
% (np.errorlevel, np.err.strip(), cmdline)
raise Exception(err)
odata = np.out
# Strip header and footer
i = odata.find('<PRE>')
beg = i > 0 and i + 6
i = odata.rfind('</PRE>')
end = i if i > 0 else len(odata)
odata = EnscriptDeuglifier().format(odata[beg:end].decode('utf-8'))
return [Markup(line) for line in odata.splitlines()]
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/mimeview/php.py | trac/tracopt/mimeview/php.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005 Christian Boos <cboos@bct-technology.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <cboos@bct-technology.com>
# Christopher Lenz <cmlenz@gmx.de>
import os
import re
from genshi.core import Markup
from trac.core import *
from trac.config import Option
from trac.mimeview.api import IHTMLPreviewRenderer, content_to_unicode
from trac.util import NaivePopen
from trac.util.html import Deuglifier
from trac.util.translation import _
__all__ = ['PHPRenderer']
php_types = ('text/x-php', 'application/x-httpd-php',
'application/x-httpd-php4', 'application/x-httpd-php1')
class PhpDeuglifier(Deuglifier):
def format(self, indata):
# The PHP highlighter produces the end-span tags on the next line
# instead of the line they actually apply to, which causes
# Trac to produce lots of (useless) open-and-immediately-close
# spans beginning each line. This tries to curtail by bubbling
# the first span after a set of 1+ "<br />" to before them.
r_fixeol = re.compile(r"((?:<br />)+)(</(?:font|span)>)")
indata = r_fixeol.sub(lambda m: m.group(2) + m.group(1), indata)
# Now call superclass implementation that handles the dirty work
# of applying css classes.
return Deuglifier.format(self, indata)
@classmethod
def rules(cls):
colors = dict(comment='FF8000', lang='0000BB', keyword='007700',
string='DD0000')
# rules check for <font> for PHP 4 or <span> for PHP 5
return [r'(?P<%s><(?:font color="|span style="color: )#%s">)' % c
for c in colors.items()
] + [r'(?P<font><font.*?>)', r'(?P<endfont></font>)']
class PHPRenderer(Component):
"""Syntax highlighter using the PHP executable."""
implements(IHTMLPreviewRenderer)
path = Option('mimeviewer', 'php_path', 'php',
"""Path to the PHP executable (''since 0.9'').""")
returns_source = True
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
if mimetype in php_types:
return 5
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
# -n to ignore php.ini so we're using default colors
cmdline = '%s -sn' % self.path
self.log.debug("PHP command line: %s" % cmdline)
content = content_to_unicode(self.env, content, mimetype)
content = content.encode('utf-8')
np = NaivePopen(cmdline, content, capturestderr=1)
if (os.name != 'nt' and np.errorlevel) or np.err:
msg = 'Running (%s) failed: %s, %s.' % (cmdline,
np.errorlevel,
np.err)
raise Exception(msg)
odata = ''.join(np.out.splitlines()[1:-1])
if odata.startswith('X-Powered-By:') or \
odata.startswith('Content-type:'):
raise TracError(_('You appear to be using the PHP CGI '
'binary. Trac requires the CLI version '
'for syntax highlighting.'))
epilogues = ["</span>", "</font>"]
for e in epilogues:
if odata.endswith(e):
odata = odata[:-len(e)]
break
html = PhpDeuglifier().format(odata.decode('utf-8'))
# PHP generates _way_ too many non-breaking spaces...
# We don't need them anyway, so replace them by normal spaces
return [Markup(line.replace(' ', ' '))
for line in html.split('<br />')]
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/mimeview/silvercity.py | trac/tracopt/mimeview/silvercity.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
"""Syntax highlighting module, based on the SilverCity module.
Get it at: http://silvercity.sourceforge.net/
"""
import re
from StringIO import StringIO
from genshi.core import Markup
from trac.core import *
from trac.config import ListOption
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.util import get_pkginfo
try:
import SilverCity
have_silvercity = True
except ImportError:
have_silvercity = False
__all__ = ['SilverCityRenderer']
types = {
'text/css': ('CSS', 3),
'text/html': ('HyperText', 3, {'asp.default.language':1}),
'application/xml': ('XML', 3),
'application/xhtml+xml': ('HyperText', 3, {'asp.default.language':1}),
'application/rss+xml': ('HyperText', 3, {'asp.default.language':1}),
'application/x-yaml': ('YAML', 3),
'text/x-yaml': ('YAML', 3),
'application/x-javascript': ('CPP', 3), # Kludgy.
'text/x-asp': ('HyperText', 3, {'asp.default.language':2}),
'text/x-c++hdr': ('CPP', 3),
'text/x-c++src': ('CPP', 3),
'text/x-chdr': ('CPP', 3),
'text/x-csrc': ('CPP', 3),
'text/x-perl': ('Perl', 3),
'text/x-php': ('HyperText', 3, {'asp.default.language': 4}),
'application/x-httpd-php': ('HyperText', 3, {'asp.default.language': 4}),
'application/x-httpd-php4': ('HyperText', 3, {'asp.default.language': 4}),
'application/x-httpd-php3': ('HyperText', 3, {'asp.default.language': 4}),
'text/x-java': ('Java', 3),
'text/x-javascript': ('CPP', 3), # Kludgy.
'text/x-psp': ('HyperText', 3, {'asp.default.language': 3}),
'text/x-python': ('Python', 3),
'text/x-ruby': ('Ruby', 3),
'text/x-sql': ('SQL', 3),
'text/x-verilog': ('Verilog', 3),
'text/xml': ('XML', 3),
'text/xslt': ('XSLT', 3),
'image/svg+xml': ('XML', 3)
}
CRLF_RE = re.compile('\r$', re.MULTILINE)
class SilverCityRenderer(Component):
"""Syntax highlighting based on SilverCity."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer)
silvercity_modes = ListOption('mimeviewer', 'silvercity_modes',
'', doc=
"""List of additional MIME types known by SilverCity.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding SilverCity mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion.
That can also be used to override the default
quality ratio used by the SilverCity render, which is 3
(''since 0.10'').""")
expand_tabs = True
returns_source = True
def __init__(self):
self._types = None
# ISystemInfoProvider methods
def get_system_info(self):
if have_silvercity:
yield 'SilverCity', get_pkginfo(SilverCity).get('version', '?')
# TODO: the above works only if setuptools was used to build
# SilverCity, which is not yet the case by default for 0.9.7.
# I've not been able to find an alternative way to get version.
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if not have_silvercity:
return 0
if not self._types:
self._types = {}
self._types.update(types)
self._types.update(
Mimeview(self.env).configured_modes_mapping('silvercity'))
return self._types.get(mimetype, (None, 0))[1]
def render(self, context, mimetype, content, filename=None, rev=None):
try:
mimetype = mimetype.split(';', 1)[0]
typelang = self._types[mimetype]
lang = typelang[0]
module = getattr(SilverCity, lang)
generator = getattr(module, lang + "HTMLGenerator")
try:
allprops = typelang[2]
propset = SilverCity.PropertySet()
for p in allprops.keys():
propset[p] = allprops[p]
except IndexError:
pass
except (KeyError, AttributeError):
err = "No SilverCity lexer found for mime-type '%s'." % mimetype
raise Exception, err
# SilverCity does not like unicode strings
content = content.encode('utf-8')
# SilverCity generates extra empty line against some types of
# the line such as comment or #include with CRLF. So we
# standardize to LF end-of-line style before call.
content = CRLF_RE.sub('', content)
buf = StringIO()
generator().generate_html(buf, content)
br_re = re.compile(r'<br\s*/?>$', re.MULTILINE)
span_default_re = re.compile(r'<span class="\w+_default">(.*?)</span>',
re.DOTALL)
html = span_default_re.sub(r'\1', br_re.sub('', buf.getvalue()))
# Convert the output back to a unicode string
html = html.decode('utf-8')
# SilverCity generates _way_ too many non-breaking spaces...
# We don't need them anyway, so replace them by normal spaces
return [Markup(line)
for line in html.replace(' ', ' ').splitlines()]
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/mimeview/__init__.py | trac/tracopt/mimeview/__init__.py | python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false | |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/mimeview/tests/php.py | trac/tracopt/mimeview/tests/php.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from StringIO import StringIO
import unittest
from trac.mimeview.api import Mimeview
from trac.test import EnvironmentStub, locate
from tracopt.mimeview.php import PhpDeuglifier, PHPRenderer
class PhpDeuglifierTestCase(unittest.TestCase):
def test_nomarkup(self):
self.assertEqual('asd', PhpDeuglifier().format('asd'))
def test_rewrite_span(self):
self.assertEqual('<span class="code-comment">asd</span>',
PhpDeuglifier().format('<span style="color: #FF8000">asd</span>'))
self.assertEqual('<span class="code-lang">asd</span>',
PhpDeuglifier().format('<span style="color: #0000BB">asd</span>'))
self.assertEqual('<span class="code-keyword">asd</span>',
PhpDeuglifier().format('<span style="color: #007700">asd</span>'))
self.assertEqual('<span class="code-string">asd</span>',
PhpDeuglifier().format('<span style="color: #DD0000">asd</span>'))
def test_rewrite_font(self):
self.assertEqual('<span class="code-comment">asd</span>',
PhpDeuglifier().format('<font color="#FF8000">asd</font>'))
self.assertEqual('<span class="code-lang">asd</span>',
PhpDeuglifier().format('<font color="#0000BB">asd</font>'))
self.assertEqual('<span class="code-keyword">asd</span>',
PhpDeuglifier().format('<font color="#007700">asd</font>'))
self.assertEqual('<span class="code-string">asd</span>',
PhpDeuglifier().format('<font color="#DD0000">asd</font>'))
def test_reorder_br(self):
"""
Regression test for #3326 point 2 (close tags after line break)
"""
self.assertEqual('<span class="code-lang"></span><br />',
PhpDeuglifier().format(
'<span style="color: #0000BB"><br /></span>'))
self.assertEqual('<span class="code-lang"></span><br /><br />',
PhpDeuglifier().format(
'<span style="color: #0000BB"><br /><br /></span>'))
class PhpRendererTestCase(unittest.TestCase):
def _test_render(self, stuff, type="string"):
env = EnvironmentStub(enable=[PHPRenderer])
m = Mimeview(env)
r = m.renderers[0]
if type == "string":
s = stuff
elif type == "file":
s = StringIO(stuff)
else:
raise NotImplementedException(
"Pass either type=file or type=string")
result = list(r.render(None, None, s))
return result
def test_boring_string(self):
"""
Simple regression test for #3624 (php chops off the last line)
"""
result = self._test_render('asd')
self.assertEqual('asd', result[0])
self.assertEqual(1, len(result))
def test_boring_filelike(self):
"""
Regression test for #3261 (treats content as string) # FIXME see #3332
"""
result = self._test_render('asd', 'file')
self.assertEqual('asd', result[0])
self.assertEqual(1, len(result))
def test_simple_string(self):
result = self._test_render('<?php\n?>')
self.assertEqual('<span class="code-lang"><?php', result[0])
self.assertEqual('?></span>', result[1])
self.assertEqual(2, len(result))
def test_simple_unicode(self):
result = self._test_render(u'<?php echo "\u00e9"; ?>')
self.assertEqual(u'<span class="code-lang"><?php </span>'
u'<span class="code-keyword">echo </span>'
u'<span class="code-string">"é"</span>'
u'<span class="code-keyword">; </span>'
u'<span class="code-lang">?></span>', result[0])
self.assertEqual(1, len(result))
def test_way_too_many_nbsp(self):
"""
Regression test for a tiny part of #1676
"""
result = self._test_render('<?php\n ?>')
self.assertEqual('<span class="code-lang"><?php', result[0])
self.assertEqual(' ?></span>', result[1])
self.assertEqual(2, len(result))
def test_deuglified_reorder_br(self):
"""
If the reordering of <br /> and the subsequent orphan </span>
isn't working, the </span> appears at the beginning of the third
line instead of the end of the second.
"""
result = self._test_render('<?php\n$x="asd";\n?>')
self.assertEqual('<span class="code-lang"><?php', result[0])
self.assertEqual('$x</span><span class="code-keyword">=</span>'
'<span class="code-string">"asd"</span>'
'<span class="code-keyword">;</span>', result[1])
self.assertEqual('<span class="code-lang">?></span>', result[2])
self.assertEqual(3, len(result))
def test_keeps_last_line(self):
"""
More complex regression test for #3624 (php chops off the last line)
"""
result = self._test_render('<p />\n<p />')
self.assertEqual('<p />', result[0])
self.assertEqual('<p />', result[1])
self.assertEqual(2, len(result))
def suite():
suite = unittest.TestSuite()
php = locate("php")
if php:
suite.addTest(unittest.makeSuite(PhpDeuglifierTestCase, 'test'))
suite.addTest(unittest.makeSuite(PhpRendererTestCase, 'test'))
else:
print("SKIP: tracopt/mimeview/tests/php.py (php cli binary, 'php', "
"not found)")
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/tracopt/mimeview/tests/__init__.py | trac/tracopt/mimeview/tests/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from tracopt.mimeview.tests import php
def suite():
suite = unittest.TestSuite()
suite.addTest(php.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/HelloWorld.py | trac/sample-plugins/HelloWorld.py | """Example macro."""
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/HelloWorld.py $"
#
# The following shows the code for macro, old-style.
#
# The `execute` function serves no purpose other than to illustrate
# the example, it will not be used anymore.
#
# ---- (ignore in your own macro) ----
# --
from trac.util import escape
def execute(hdf, txt, env):
# Currently hdf is set only when the macro is called
# From a wiki page
if hdf:
hdf['wiki.macro.greeting'] = 'Hello World'
# args will be `None` if the macro is called without parenthesis.
args = txt or 'No arguments'
# then, as `txt` comes from the user, it's important to guard against
# the possibility to inject malicious HTML/Javascript, by using `escape()`:
return 'Hello World, args = ' + escape(args)
# --
# ---- (ignore in your own macro) ----
#
# The following is the converted new-style macro
#
# ---- (reuse for your own macro) ----
# --
from trac.wiki.macros import WikiMacroBase
class HelloWorldMacro(WikiMacroBase):
_description = cleandoc_(
"""Simple HelloWorld macro.
Note that the name of the class is meaningful:
- it must end with "Macro"
- what comes before "Macro" ends up being the macro name
The documentation of the class (i.e. what you're reading)
will become the documentation of the macro, as shown by
the !MacroList macro (usually used in the TracWikiMacros page).
""")
def expand_macro(self, formatter, name, args):
"""Return some output that will be displayed in the Wiki content.
`name` is the actual name of the macro (no surprise, here it'll be
`'HelloWorld'`),
`args` is the text enclosed in parenthesis at the call of the macro.
Note that if there are ''no'' parenthesis (like in, e.g.
[[HelloWorld]]), then `args` is `None`.
"""
return 'Hello World, args = ' + unicode(args)
# Note that there's no need to HTML escape the returned data,
# as the template engine (Genshi) will do it for us.
# --
# ---- (reuse for your own macro) ----
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/Timestamp.py | trac/sample-plugins/Timestamp.py | """Inserts the current time (in seconds) into the wiki page."""
revision = "$Rev: 10617 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/Timestamp.py $"
#
# The following shows the code for macro, old-style.
#
# The `execute` function serves no purpose other than to illustrate
# the example, it will not be used anymore.
#
# ---- (ignore in your own macro) ----
# --
import time # Trac before version 0.11 was using `time` module
def execute(hdf, txt, env):
t = time.localtime()
return "<b>%s</b>" % time.strftime('%c', t)
# --
# ---- (ignore in your own macro) ----
#
# The following is the converted new-style macro
#
# ---- (reuse for your own macro) ----
# --
from datetime import datetime
# Note: since Trac 0.11, datetime objects are used internally
from genshi.builder import tag
from trac.util.datefmt import format_datetime, utc
from trac.wiki.macros import WikiMacroBase
class TimestampMacro(WikiMacroBase):
_description = "Inserts the current time (in seconds) into the wiki page."
def expand_macro(self, formatter, name, args):
t = datetime.now(utc)
return tag.b(format_datetime(t, '%c'))
# --
# ---- (reuse for your own macro) ----
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/revision_links.py | trac/sample-plugins/revision_links.py | """Sample Wiki syntax extension plugin."""
from genshi.builder import tag
from trac.core import *
from trac.util.text import shorten_line
from trac.versioncontrol.api import NoSuchChangeset, RepositoryManager
from trac.versioncontrol.web_ui import ChangesetModule
from trac.wiki.api import IWikiSyntaxProvider
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/revision_links.py $"
class RevisionLinks(Component):
"""Adds a few more ways to refer to changesets."""
implements(IWikiSyntaxProvider)
KEYWORDS = ['[Rr]ev(?:ision)?', '[Cc]hangeset']
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
def revlink(f, match, fullmatch):
elts = match.split()
rev = elts[1] # ignore keyword
reponame = ''
if len(elts) > 2: # reponame specified
reponame = elts[-1]
return self._format_revision_link(f, 'revision', reponame, rev, rev,
fullmatch)
yield (r"!?(?:%s)\s+%s(?:\s+in\s+\w+)?" %
("|".join(self.KEYWORDS), ChangesetModule.CHANGESET_ID), revlink)
def get_link_resolvers(self):
def resolverev(f, ns, rev, label, fullmatch):
return self._format_revision_link(f, ns, '', rev, label, fullmatch)
yield ('revision', resolverev)
def _format_revision_link(self, formatter, ns, reponame, rev, label,
fullmatch=None):
rev, params, fragment = formatter.split_link(rev)
try:
repos = RepositoryManager(self.env).get_repository(reponame)
if repos:
changeset = repos.get_changeset(rev)
return tag.a(label, class_="changeset",
title=shorten_line(changeset.message),
href=(formatter.href.changeset(rev) +
params + fragment))
except NoSuchChangeset:
pass
return tag.a(label, class_="missing changeset", rel="nofollow",
href=formatter.href.changeset(rev))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/milestone_to_version.py | trac/sample-plugins/milestone_to_version.py | import re
from trac.config import Option
from trac.core import *
from trac.resource import ResourceNotFound
from trac.ticket.api import IMilestoneChangeListener
from trac.ticket.model import Version
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/milestone_to_version.py $"
class MilestoneToVersion(Component):
"""Automatically create a version when a milestone is completed.
Sample plugin demonstrating the IMilestoneChangeListener interface.
Creates a version from a just-completed milestone based on whether the
milestone's name matches a specified pattern.
"""
implements(IMilestoneChangeListener)
pattern = Option('milestone_to_version', 'pattern',
r'(?i)(?:v(?:er)?\.?|version)?\s*(?P<version>\d.*)',
"""A regular expression to match the names of milestones that should be
made into versions when they are completed. The pattern must include
one named group called 'version' that matches the version number
itself.""")
def milestone_created(self, milestone):
pass
def milestone_changed(self, milestone, old_values):
if not milestone.is_completed or 'completed' not in old_values \
or old_values['completed'] is not None:
return
m = re.match(self.pattern, milestone.name)
if not m:
return
version_name = m.groupdict().get('version')
if not version_name:
return
try:
version = Version(self.env, version_name)
if not version.time:
version.time = milestone.completed
version.update()
self.log.info('Existing version "%s" updated with completion '
'time from milestone "%s"' %
(version.name, milestone.name))
else:
self.log.info('Version "%s" already exists. No new version '
'created from milestone "%s"' %
(version.name, milestone.name))
except ResourceNotFound:
version = Version(self.env)
version.name = version_name
version.time = milestone.completed
version.insert()
self.log.info('New version "%s" created from completed milstone '
'"%s".' % (version.name, milestone.name))
def milestone_deleted(self, milestone):
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/permissions/public_wiki_policy.py | trac/sample-plugins/permissions/public_wiki_policy.py | from fnmatch import fnmatchcase
from trac.config import Option
from trac.core import *
from trac.perm import IPermissionPolicy
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/permissions/public_wiki_policy.py $"
class PublicWikiPolicy(Component):
"""Allow public access to some wiki pages.
This is a sample permission policy plugin illustrating how to check
permission on realms.
Don't forget to integrate that plugin in the appropriate place in the
list of permission policies:
{{{
[trac]
permission_policies = PublicWikiPolicy, DefaultPermissionPolicy
}}}
Then you can configure which pages you want to make public:
{{{
[public_wiki]
view = Public*
modify = PublicSandbox/*
}}}
"""
implements(IPermissionPolicy)
view = Option('public_wiki', 'view', 'Public*',
"""Case-sensitive glob pattern used for granting view permission on
all Wiki pages matching it.""")
modify = Option('public_wiki', 'modify', 'Public*',
"""Case-sensitive glob pattern used for granting modify permissions
on all Wiki pages matching it.""")
def check_permission(self, action, username, resource, perm):
if resource: # fine-grained permission check
if resource.realm == 'wiki': # wiki realm or resource
if resource.id: # ... it's a resource
if action == 'WIKI_VIEW': # (think 'VIEW' here)
pattern = self.view
else:
pattern = self.modify
if fnmatchcase(resource.id, pattern):
return True
else: # ... it's a realm
return True
# this policy ''may'' grant permissions on some wiki pages
else: # coarse-grained permission check
#
# support for the legacy permission checks: no resource specified
# and realm information in the action name itself.
#
if action.startswith('WIKI_'):
return True
# this policy ''may'' grant permissions on some wiki pages
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/permissions/debug_perm.py | trac/sample-plugins/permissions/debug_perm.py | from trac.core import *
from trac.perm import IPermissionPolicy, PermissionCache
from trac.resource import Resource
revision = "$Rev$"
url = "$URL$"
class DebugPolicy(Component):
"""Verify the well-formedness of the permission checks.
**This plugin is only useful for Trac Development.**
Once this plugin is enabled, you'll have to insert it at the appropriate
place in your list of permission policies, e.g.
{{{
[trac]
permission_policies = DebugPolicy, SecurityTicketsPolicy, AuthzPolicy,
DefaultPermissionPolicy, LegacyAttachmentPolicy
}}}
"""
implements(IPermissionPolicy)
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
if resource:
assert resource is None or isinstance(resource, Resource)
assert isinstance(perm, PermissionCache)
self.log.info("does '%s' have %s on %r?", username, action, resource)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/permissions/vulnerability_tickets.py | trac/sample-plugins/permissions/vulnerability_tickets.py | from trac.core import *
from trac.perm import IPermissionPolicy, IPermissionRequestor
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/permissions/vulnerability_tickets.py $"
class SecurityTicketsPolicy(Component):
"""Prevent public access to security sensitive tickets.
Add the VULNERABILITY_VIEW permission as a pre-requisite for any
other permission check done on tickets that have the words
"security" or "vulnerability" in the summary or keywords fields.
Once this plugin is enabled, you'll have to insert it at the appropriate
place in your list of permission policies, e.g.
{{{
[trac]
permission_policies = SecurityTicketsPolicy, AuthzPolicy,
DefaultPermissionPolicy, LegacyAttachmentPolicy
}}}
"""
implements(IPermissionPolicy, IPermissionRequestor)
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
# We add the 'VULNERABILITY_VIEW' pre-requisite for any action
# other than 'VULNERABILITY_VIEW' itself, as this would lead
# to recursion.
if action == 'VULNERABILITY_VIEW':
return
# Check whether we're dealing with a ticket resource
while resource:
if resource.realm == 'ticket':
break
resource = resource.parent
if resource and resource.realm == 'ticket' and resource.id is not None:
for keywords, summary in self.env.db_query(
"SELECT keywords, summary FROM ticket WHERE id=%s",
(resource.id,)):
fields = ''.join(f for f in (keywords, summary) if f).lower()
if 'security' in fields or 'vulnerability' in fields:
if 'VULNERABILITY_VIEW' not in perm:
return False
# IPermissionRequestor methods
def get_permission_actions(self):
yield 'VULNERABILITY_VIEW'
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/workflow/StatusFixer.py | trac/sample-plugins/workflow/StatusFixer.py | from genshi.builder import tag
from trac.core import Component, implements
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.perm import IPermissionRequestor
revision = "$Rev: 11075 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/workflow/StatusFixer.py $"
class StatusFixerActionController(Component):
"""Provides the admin with a way to correct a ticket's status.
This plugin is especially useful when you made changes to your workflow,
and some ticket status are no longer valid. The tickets that are in those
status can then be set to some valid state.
Don't forget to add `StatusFixerActionController` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,StatusFixerActionController
}}}
"""
implements(ITicketActionController, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_STATUSFIX']
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
actions = []
if 'TICKET_STATUSFIX' in req.perm(ticket.resource):
actions.append((0, 'force_status'))
return actions
def get_all_status(self):
"""Return all the status that are present in the database,
so that queries for status no longer in use can be made.
"""
return [status for status, in
self.env.db_query("SELECT DISTINCT status FROM ticket")]
def render_ticket_action_control(self, req, ticket, action):
# Need to use the list of all status so you can't manually set
# something to an invalid state.
selected_value = req.args.get('force_status_value', 'new')
all_status = TicketSystem(self.env).get_all_status()
render_control = tag.select(
[tag.option(x, selected=(x == selected_value and 'selected' or
None)) for x in all_status],
id='force_status_value', name='force_status_value')
return ("force status to:", render_control,
"The next status will be the selected one")
def get_ticket_changes(self, req, ticket, action):
return {'status': req.args.get('force_status_value')}
def apply_action_side_effects(self, req, ticket, action):
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/workflow/VoteOperation.py | trac/sample-plugins/workflow/VoteOperation.py | from genshi.builder import tag
from trac.core import implements,Component
from trac.ticket.api import ITicketActionController
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
from trac.ticket.model import Priority, Ticket
#from trac.perm import IPermissionRequestor # (TODO)
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/workflow/VoteOperation.py $"
class VoteOperation(Component):
"""Provides a simplistic vote feature.
This is a sample action controller illustrating how to create additional
''operations''.
Don't forget to add `VoteOperation` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,VoteOperation
}}}
"""
implements(ITicketActionController)
def get_ticket_actions(self, req, ticket):
controller = ConfigurableTicketWorkflow(self.env)
return controller.get_actions_by_operation_for_req(req, ticket, 'vote')
def get_all_status(self):
return []
def render_ticket_action_control(self, req, ticket, action):
id = 'vote_%s_result' % (action, )
selected_value = req.args.get(id, 'for')
options = ['for', 'against']
return ("vote",
tag.select([tag.option(x, selected=(x == selected_value or
None))
for x in options], name=id, id=id),
"Vote on the issue, raising or lowering its priority")
def get_ticket_changes(self, req, ticket, action):
id = 'vote_%s_result' % (action, )
selected = req.args.get(id, 'for')
priorities = list(Priority.select(self.env))
orig_ticket = Ticket(self.env, ticket.id)
current_priority = int(Priority(self.env, name=
orig_ticket['priority']).value)
if selected == 'for':
# priorities are 1-based, not 0-based
new_value = max(1, current_priority - 1)
else:
maxval = max([int(p.value) for p in priorities])
new_value = min(maxval, current_priority + 1)
return {'priority': [p.name for p in priorities
if int(p.value) == new_value][0]}
def apply_action_side_effects(self, req, ticket, action):
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/workflow/DeleteTicket.py | trac/sample-plugins/workflow/DeleteTicket.py | from genshi.builder import tag
from trac.core import implements,Component
from trac.ticket.api import ITicketActionController
from trac.perm import IPermissionRequestor
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/workflow/DeleteTicket.py $"
class DeleteTicketActionController(Component):
"""Provides the admin with a way to delete a ticket.
Illustrates how to create an action controller with side-effects.
Don't forget to add `DeleteTicketActionController` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,DeleteTicketActionController
}}}
"""
implements(ITicketActionController, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_DELETE']
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
actions = []
if 'TICKET_DELETE' in req.perm(ticket.resource):
actions.append((0,'delete'))
return actions
def get_all_status(self):
return []
def render_ticket_action_control(self, req, ticket, action):
return ("delete ticket", '', "This ticket will be deleted.")
def get_ticket_changes(self, req, ticket, action):
return {}
def apply_action_side_effects(self, req, ticket, action):
# Be paranoid here, as this should only be called when
# action is delete...
if action == 'delete':
ticket.delete()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/workflow/CodeReview.py | trac/sample-plugins/workflow/CodeReview.py | from genshi.builder import tag
from trac.core import implements,Component
from trac.ticket.api import ITicketActionController
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
from trac.perm import IPermissionRequestor
from trac.config import Option, ListOption
revision = "$Rev: 11490 $"
url = "$URL: http://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/workflow/CodeReview.py $"
class CodeReviewActionController(Component):
"""Support for simple code reviews.
The action that supports the `code_review` operation will present
an extra choice for the review decision. Depending on that decision,
a specific state will be selected.
Example (from the enterprise-review-workflow.ini):
{{{
review = in_review -> *
review.name = review as
review.operations = code_review
review.code_review =
approve -> in_QA,
approve as noted -> post_review,
request changes -> in_work
}}}
Don't forget to add the `CodeReviewActionController` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,CodeReviewActionController
}}}
"""
implements(ITicketActionController, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_REVIEW']
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
# The review action is available in those status where it has been
# configured, for those users who have the TICKET_REVIEW permission, as
# long as they are not the owner of the ticket (you can't review your
# own work!).
actions_we_handle = []
if req.authname != ticket['owner'] and \
'TICKET_REVIEW' in req.perm(ticket.resource):
controller = ConfigurableTicketWorkflow(self.env)
actions_we_handle = controller.get_actions_by_operation_for_req(
req, ticket, 'code_review')
self.log.debug('code review handles actions: %r' % actions_we_handle)
return actions_we_handle
def get_all_status(self):
all_status = set()
controller = ConfigurableTicketWorkflow(self.env)
ouractions = controller.get_actions_by_operation('code_review')
for weight, action in ouractions:
status = [status for option, status in
self._get_review_options(action)]
all_status.update(status)
return all_status
def render_ticket_action_control(self, req, ticket, action):
id, grade = self._get_grade(req, action)
review_options = self._get_review_options(action)
actions = ConfigurableTicketWorkflow(self.env).actions
selected_value = grade or review_options[0][0]
label = actions[action]['name']
control = tag(["as: ",
tag.select([tag.option(option, selected=
(option == selected_value or
None))
for option, status in review_options],
name=id, id=id)])
if grade:
new_status = self._get_new_status(req, ticket, action,
review_options)
hint = "Next status will be '%s'" % new_status
else:
hint = "Next status will be one of " + \
', '.join(["'%s'" % status
for option, status in review_options])
return (label, control, hint)
def get_ticket_changes(self, req, ticket, action):
new_status = self._get_new_status(req, ticket, action)
return {'status': new_status or 'new'}
def apply_action_side_effects(self, req, ticket, action):
pass
# Internal methods
def _get_grade(self, req, action):
id = action + '_code_review_result'
return id, req.args.get(id)
def _get_review_options(self, action):
return [[x.strip() for x in raw_option.split('->')]
for raw_option in self.config.getlist('ticket-workflow',
action + '.code_review')]
def _get_new_status(self, req, ticket, action, review_options=None):
id, grade = self._get_grade(req, action)
if not review_options:
review_options = self._get_review_options(action)
for option, status in review_options:
if grade == option:
return status
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/sample-plugins/workflow/MilestoneOperation.py | trac/sample-plugins/workflow/MilestoneOperation.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Franz Mayer <franz.mayer@gefasoft.de>
#
# "THE BEER-WARE LICENSE" (Revision 42):
# <franz.mayer@gefasoft.de> wrote this file. As long as you retain this
# notice you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in return.
# Franz Mayer
#
# Author: Franz Mayer <franz.mayer@gefasoft.de>
from genshi.builder import tag
from trac.core import Component, implements
from trac.resource import ResourceNotFound
from trac.ticket.api import ITicketActionController
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
from trac.ticket.model import Milestone
from trac.util.translation import _
from trac.web.chrome import add_warning
revision = "$Rev$"
url = "$URL$"
class MilestoneOperation(Component):
"""Sets milestone for specific status.
=== Example ===
{{{
[ticket-workflow]
resolve.operations = set_resolution,set_milestone
resolve.milestone = invalid,wontfix,duplicate,worksforme->rejected
}}}
When setting status to `duplicate` the milestone will automatically change
to `rejected`.
'''Note:''' if user has changed milestone manually, this workflow operation
has ''no effect''!
=== Configuration ===
Don't forget to add `MilestoneOperation` to the workflow option
in `[ticket]` section. If there is no workflow option, the line will look
like this:
{{{
[ticket]
workflow = ConfigurableTicketWorkflow,MilestoneOperation
}}}
"""
implements(ITicketActionController)
def get_ticket_actions(self, req, ticket):
actions_we_handle = []
if req.authname != 'anonymous' and \
'TICKET_MODIFY' in req.perm(ticket.resource):
controller = ConfigurableTicketWorkflow(self.env)
actions_we_handle = controller.get_actions_by_operation_for_req(
req, ticket, 'set_milestone')
self.log.debug('set_milestone handles actions: %r' % actions_we_handle)
return actions_we_handle
def get_all_status(self):
return []
def render_ticket_action_control(self, req, ticket, action):
actions = ConfigurableTicketWorkflow(self.env).actions
label = actions[action]['name']
res_ms = self.__get_resolution_milestone_dict(ticket, action)
resolutions = ''
milestone = None
for i, resolution in enumerate(res_ms):
if i > 0:
resolutions = "%s, '%s'" % (resolutions, resolution)
else:
resolutions = "'%s'" % resolution
milestone = res_ms[resolution]
hint = _("For resolution %(resolutions)s the milestone will be "
"set to '%(milestone)s'.",
resolutions=resolutions, milestone=milestone)
return (label, None, hint)
def get_ticket_changes(self, req, ticket, action):
if action == 'resolve' and \
req.args and 'action_resolve_resolve_resolution' in req.args:
old_milestone = ticket._old.get('milestone') or None
user_milestone = ticket['milestone'] or None
# If there's no user defined milestone, we try to set it
# using the defined resolution -> milestone mapping.
if old_milestone is None:
new_status = req.args['action_resolve_resolve_resolution']
new_milestone = self.__get_new_milestone(ticket, action,
new_status)
# ... but we don't reset it to None unless it was None
if new_milestone is not None or user_milestone is None:
try:
milestone = Milestone(self.env, new_milestone)
self.log.info('changed milestone from %s to %s' %
(old_milestone, new_milestone) )
return {'milestone': new_milestone}
except ResourceNotFound:
add_warning(req, _("Milestone %(name)s does not exist.",
name=new_milestone))
return {}
def apply_action_side_effects(self, req, ticket, action):
pass
def __get_new_milestone(self, ticket, action, new_status):
"""Determines the new status"""
if new_status:
res_ms = self.__get_resolution_milestone_dict(ticket, action)
return res_ms.get(new_status)
def __get_resolution_milestone_dict(self, ticket, action):
transitions = self.config.get('ticket-workflow',
action + '.milestone').strip()
transition = [x.strip() for x in transitions.split('->')]
resolutions = [y.strip() for y in transition[0].split(',')]
res_milestone = {}
for res in resolutions:
res_milestone[res] = transition[1]
return res_milestone
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/loader.py | trac/trac/loader.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from glob import glob
import imp
import os.path
import pkg_resources
from pkg_resources import working_set, DistributionNotFound, VersionConflict, \
UnknownExtra
import sys
from trac.util import get_doc, get_module_path, get_sources, get_pkginfo
from trac.util.text import exception_to_unicode, to_unicode
__all__ = ['load_components']
def _enable_plugin(env, module):
"""Enable the given plugin module if it wasn't disabled explicitly."""
if env.is_component_enabled(module) is None:
env.enable_component(module)
def load_eggs(entry_point_name):
"""Loader that loads any eggs on the search path and `sys.path`."""
def _load_eggs(env, search_path, auto_enable=None):
# Note that the following doesn't seem to support unicode search_path
distributions, errors = working_set.find_plugins(
pkg_resources.Environment(search_path)
)
for dist in distributions:
if dist not in working_set:
env.log.debug('Adding plugin %s from %s', dist, dist.location)
working_set.add(dist)
def _log_error(item, e):
ue = exception_to_unicode(e)
if isinstance(e, DistributionNotFound):
env.log.debug('Skipping "%s": ("%s" not found)', item, ue)
elif isinstance(e, VersionConflict):
env.log.error('Skipping "%s": (version conflict "%s")',
item, ue)
elif isinstance(e, UnknownExtra):
env.log.error('Skipping "%s": (unknown extra "%s")', item, ue)
else:
env.log.error('Skipping "%s": %s', item,
exception_to_unicode(e, traceback=True))
for dist, e in errors.iteritems():
_log_error(dist, e)
for entry in sorted(working_set.iter_entry_points(entry_point_name),
key=lambda entry: entry.name):
env.log.debug('Loading %s from %s', entry.name, entry.dist.location)
try:
entry.load(require=True)
except Exception, e:
_log_error(entry, e)
else:
if os.path.dirname(entry.dist.location) == auto_enable:
_enable_plugin(env, entry.module_name)
return _load_eggs
def load_py_files():
"""Loader that look for Python source files in the plugins directories,
which simply get imported, thereby registering them with the component
manager if they define any components.
"""
def _load_py_files(env, search_path, auto_enable=None):
for path in search_path:
plugin_files = glob(os.path.join(path, '*.py'))
for plugin_file in plugin_files:
try:
plugin_name = os.path.basename(plugin_file[:-3])
env.log.debug('Loading file plugin %s from %s' % \
(plugin_name, plugin_file))
if plugin_name not in sys.modules:
module = imp.load_source(plugin_name, plugin_file)
if path == auto_enable:
_enable_plugin(env, plugin_name)
except Exception, e:
env.log.error('Failed to load plugin from %s: %s',
plugin_file,
exception_to_unicode(e, traceback=True))
return _load_py_files
def get_plugins_dir(env):
"""Return the path to the `plugins` directory of the environment."""
plugins_dir = os.path.realpath(os.path.join(env.path, 'plugins'))
return os.path.normcase(plugins_dir)
def load_components(env, extra_path=None, loaders=(load_eggs('trac.plugins'),
load_py_files())):
"""Load all plugin components found on the given search path."""
plugins_dir = get_plugins_dir(env)
search_path = [plugins_dir]
if extra_path:
search_path += list(extra_path)
for loadfunc in loaders:
loadfunc(env, search_path, auto_enable=plugins_dir)
def get_plugin_info(env, include_core=False):
"""Return package information about Trac core and installed plugins."""
path_sources = {}
def find_distribution(module):
name = module.__name__
path = get_module_path(module)
sources = path_sources.get(path)
if sources is None:
sources = path_sources[path] = get_sources(path)
dist = sources.get(name.replace('.', '/') + '.py')
if dist is None:
dist = sources.get(name.replace('.', '/') + '/__init__.py')
if dist is None:
# This is a plain Python source file, not an egg
dist = pkg_resources.Distribution(project_name=name,
version='',
location=module.__file__)
return dist
plugins_dir = get_plugins_dir(env)
plugins = {}
from trac.core import ComponentMeta
for component in ComponentMeta._components:
module = sys.modules[component.__module__]
dist = find_distribution(module)
plugin_filename = None
if os.path.realpath(os.path.dirname(dist.location)) == plugins_dir:
plugin_filename = os.path.basename(dist.location)
if dist.project_name not in plugins:
readonly = True
if plugin_filename and os.access(dist.location,
os.F_OK + os.W_OK):
readonly = False
# retrieve plugin metadata
info = get_pkginfo(dist)
if not info:
info = {}
for k in ('author', 'author_email', 'home_page', 'url',
'license', 'trac'):
v = getattr(module, k, '')
if v and isinstance(v, basestring):
if k == 'home_page' or k == 'url':
k = 'home_page'
v = v.replace('$', '').replace('URL: ', '')
else:
v = to_unicode(v)
info[k] = v
else:
# Info found; set all those fields to "None" that have the
# value "UNKNOWN" as this is the value for fields that
# aren't specified in "setup.py"
for k in info:
if info[k] == 'UNKNOWN':
info[k] = ''
else:
# Must be encoded as unicode as otherwise Genshi
# may raise a "UnicodeDecodeError".
info[k] = to_unicode(info[k])
# retrieve plugin version info
version = dist.version
if not version:
version = (getattr(module, 'version', '') or
getattr(module, 'revision', ''))
# special handling for "$Rev$" strings
version = version.replace('$', '').replace('Rev: ', 'r')
plugins[dist.project_name] = {
'name': dist.project_name, 'version': version,
'path': dist.location, 'plugin_filename': plugin_filename,
'readonly': readonly, 'info': info, 'modules': {},
}
modules = plugins[dist.project_name]['modules']
if module.__name__ not in modules:
summary, description = get_doc(module)
plugins[dist.project_name]['modules'][module.__name__] = {
'summary': summary, 'description': description,
'components': {},
}
full_name = module.__name__ + '.' + component.__name__
summary, description = get_doc(component)
c = component
if c in env and not issubclass(c, env.__class__):
c = component(env)
modules[module.__name__]['components'][component.__name__] = {
'full_name': full_name,
'summary': summary, 'description': description,
'enabled': env.is_component_enabled(component),
'required': getattr(c, 'required', False),
}
if not include_core:
for name in plugins.keys():
if name.lower() == 'trac':
plugins.pop(name)
return sorted(plugins.itervalues(),
key=lambda p: (p['name'].lower() != 'trac',
p['name'].lower()))
def match_plugins_to_frames(plugins, frames):
"""Add a `frame_idx` element to plugin information as returned by
`get_plugin_info()`, containing the index of the highest frame in the
list that was located in the plugin.
"""
egg_frames = [(i, f) for i, f in enumerate(frames)
if f['filename'].startswith('build/')]
def find_egg_frame_index(plugin):
for dist in pkg_resources.find_distributions(plugin['path'],
only=True):
try:
sources = dist.get_metadata('SOURCES.txt')
for src in sources.splitlines():
if src.endswith('.py'):
nsrc = src.replace('\\', '/')
for i, f in egg_frames:
if f['filename'].endswith(nsrc):
plugin['frame_idx'] = i
return
except KeyError:
pass # Metadata not found
for plugin in plugins:
base, ext = os.path.splitext(plugin['path'])
if ext == '.egg' and egg_frames:
find_egg_frame_index(plugin)
else:
for i, f in enumerate(frames):
if f['filename'].startswith(base):
plugin['frame_idx'] = i
break
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db_default.py | trac/trac/db_default.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
from trac.db import Table, Column, Index
# Database version identifier. Used for automatic upgrades.
db_version = 29
def __mkreports(reports):
"""Utility function used to create report data in same syntax as the
default data. This extra step is done to simplify editing the default
reports."""
result = []
for report in reports:
result.append((None, report[0], report[2], report[1]))
return result
##
## Database schema
##
schema = [
# Common
Table('system', key='name')[
Column('name'),
Column('value')],
Table('permission', key=('username', 'action'))[
Column('username'),
Column('action')],
Table('auth_cookie', key=('cookie', 'ipnr', 'name'))[
Column('cookie'),
Column('name'),
Column('ipnr'),
Column('time', type='int')],
Table('session', key=('sid', 'authenticated'))[
Column('sid'),
Column('authenticated', type='int'),
Column('last_visit', type='int'),
Index(['last_visit']),
Index(['authenticated'])],
Table('session_attribute', key=('sid', 'authenticated', 'name'))[
Column('sid'),
Column('authenticated', type='int'),
Column('name'),
Column('value')],
Table('cache', key='id')[
Column('id', type='int'),
Column('generation', type='int'),
Column('key')],
# Attachments
Table('attachment', key=('type', 'id', 'filename'))[
Column('type'),
Column('id'),
Column('filename'),
Column('size', type='int'),
Column('time', type='int64'),
Column('description'),
Column('author'),
Column('ipnr')],
# Wiki system
Table('wiki', key=('name', 'version'))[
Column('name'),
Column('version', type='int'),
Column('time', type='int64'),
Column('author'),
Column('ipnr'),
Column('text'),
Column('comment'),
Column('readonly', type='int'),
Index(['time'])],
# Version control cache
Table('repository', key=('id', 'name'))[
Column('id', type='int'),
Column('name'),
Column('value')],
Table('revision', key=('repos', 'rev'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('time', type='int64'),
Column('author'),
Column('message'),
Index(['repos', 'time'])],
Table('node_change', key=('repos', 'rev', 'path', 'change_type'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('path', key_size=255),
Column('node_type', size=1),
Column('change_type', size=1, key_size=2),
Column('base_path'),
Column('base_rev'),
Index(['repos', 'rev'])],
# Ticket system
Table('ticket', key='id')[
Column('id', auto_increment=True),
Column('type'),
Column('time', type='int64'),
Column('changetime', type='int64'),
Column('component'),
Column('severity'),
Column('priority'),
Column('owner'),
Column('reporter'),
Column('cc'),
Column('version'),
Column('milestone'),
Column('status'),
Column('resolution'),
Column('summary'),
Column('description'),
Column('keywords'),
Index(['time']),
Index(['status'])],
Table('ticket_change', key=('ticket', 'time', 'field'))[
Column('ticket', type='int'),
Column('time', type='int64'),
Column('author'),
Column('field'),
Column('oldvalue'),
Column('newvalue'),
Index(['ticket']),
Index(['time'])],
Table('ticket_custom', key=('ticket', 'name'))[
Column('ticket', type='int'),
Column('name'),
Column('value')],
Table('enum', key=('type', 'name'))[
Column('type'),
Column('name'),
Column('value')],
Table('component', key='name')[
Column('name'),
Column('owner'),
Column('description')],
Table('milestone', key='name')[
Column('name'),
Column('due', type='int64'),
Column('completed', type='int64'),
Column('description')],
Table('version', key='name')[
Column('name'),
Column('time', type='int64'),
Column('description')],
# Report system
Table('report', key='id')[
Column('id', auto_increment=True),
Column('author'),
Column('title'),
Column('query'),
Column('description')],
]
##
## Default Reports
##
def get_reports(db):
return (
('Active Tickets',
"""\
* List all active tickets by priority.
* Color each row based on priority.
""",
"""\
SELECT p.value AS __color__,
id AS ticket, summary, component, version, milestone, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY """ + db.cast('p.value', 'int') + """, milestone, t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets by Version',
"""\
This report shows how to color results by priority,
while grouping results by version.
Last modification time, description and reporter are included as hidden fields
for useful RSS export.
""",
"""\
SELECT p.value AS __color__,
version AS __group__,
id AS ticket, summary, component, version, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (version IS NULL),version, """ + db.cast('p.value', 'int') +
""", t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets by Milestone',
"""\
This report shows how to color results by priority,
while grouping results by milestone.
Last modification time, description and reporter are included as hidden fields
for useful RSS export.
""",
"""\
SELECT p.value AS __color__,
%s AS __group__,
id AS ticket, summary, component, version, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (milestone IS NULL),milestone, %s, t.type, time
""" % (db.concat("'Milestone '", 'milestone'), db.cast('p.value', 'int'))),
#----------------------------------------------------------------------------
('Accepted, Active Tickets by Owner',
"""\
List accepted tickets, group by ticket owner, sorted by priority.
""",
"""\
SELECT p.value AS __color__,
owner AS __group__,
id AS ticket, summary, component, milestone, t.type AS type, time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status = 'accepted'
ORDER BY owner, """ + db.cast('p.value', 'int') + """, t.type, time
"""),
#----------------------------------------------------------------------------
('Accepted, Active Tickets by Owner (Full Description)',
"""\
List tickets accepted, group by ticket owner.
This report demonstrates the use of full-row display.
""",
"""\
SELECT p.value AS __color__,
owner AS __group__,
id AS ticket, summary, component, milestone, t.type AS type, time AS created,
description AS _description_,
changetime AS _changetime, reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status = 'accepted'
ORDER BY owner, """ + db.cast('p.value', 'int') + """, t.type, time
"""),
#----------------------------------------------------------------------------
('All Tickets By Milestone (Including closed)',
"""\
A more complex example to show how to make advanced reports.
""",
"""\
SELECT p.value AS __color__,
t.milestone AS __group__,
(CASE status
WHEN 'closed' THEN 'color: #777; background: #ddd; border-color: #ccc;'
ELSE
(CASE owner WHEN $USER THEN 'font-weight: bold' END)
END) AS __style__,
id AS ticket, summary, component, status,
resolution,version, t.type AS type, priority, owner,
changetime AS modified,
time AS _time,reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
ORDER BY (milestone IS NULL), milestone DESC, (status = 'closed'),
(CASE status WHEN 'closed' THEN changetime ELSE (-1) * %s END) DESC
""" % db.cast('p.value', 'int')),
#----------------------------------------------------------------------------
('My Tickets',
"""\
This report demonstrates the use of the automatically set
USER dynamic variable, replaced with the username of the
logged in user when executed.
""",
"""\
SELECT __color__, __group,
(CASE
WHEN __group = 1 THEN 'Accepted'
WHEN __group = 2 THEN 'Owned'
WHEN __group = 3 THEN 'Reported'
ELSE 'Commented' END) AS __group__,
ticket, summary, component, version, milestone,
type, priority, created, _changetime, _description,
_reporter
FROM (
SELECT DISTINCT """ + db.cast('p.value', 'int') + """ AS __color__,
(CASE
WHEN owner = $USER AND status = 'accepted' THEN 1
WHEN owner = $USER THEN 2
WHEN reporter = $USER THEN 3
ELSE 4 END) AS __group,
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN ticket_change tc ON tc.ticket = t.id AND tc.author = $USER
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND (owner = $USER OR reporter = $USER OR author = $USER)
) AS sub
ORDER BY __group, __color__, milestone, type, created
"""),
#----------------------------------------------------------------------------
('Active Tickets, Mine first',
"""\
* List all active tickets by priority.
* Show all tickets owned by the logged in user in a group first.
""",
"""\
SELECT p.value AS __color__,
(CASE owner
WHEN $USER THEN 'My Tickets'
ELSE 'Active Tickets'
END) AS __group__,
id AS ticket, summary, component, version, milestone, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (COALESCE(owner, '') = $USER) DESC, """
+ db.cast('p.value', 'int') + """, milestone, t.type, time
"""))
##
## Default database values
##
# (table, (column1, column2), ((row1col1, row1col2), (row2col1, row2col2)))
def get_data(db):
return (('component',
('name', 'owner'),
(('component1', 'somebody'),
('component2', 'somebody'))),
('milestone',
('name', 'due', 'completed'),
(('milestone1', 0, 0),
('milestone2', 0, 0),
('milestone3', 0, 0),
('milestone4', 0, 0))),
('version',
('name', 'time'),
(('1.0', 0),
('2.0', 0))),
('enum',
('type', 'name', 'value'),
(('resolution', 'fixed', 1),
('resolution', 'invalid', 2),
('resolution', 'wontfix', 3),
('resolution', 'duplicate', 4),
('resolution', 'worksforme', 5),
('priority', 'blocker', 1),
('priority', 'critical', 2),
('priority', 'major', 3),
('priority', 'minor', 4),
('priority', 'trivial', 5),
('ticket_type', 'defect', 1),
('ticket_type', 'enhancement', 2),
('ticket_type', 'task', 3))),
('permission',
('username', 'action'),
(('anonymous', 'LOG_VIEW'),
('anonymous', 'FILE_VIEW'),
('anonymous', 'WIKI_VIEW'),
('authenticated', 'WIKI_CREATE'),
('authenticated', 'WIKI_MODIFY'),
('anonymous', 'SEARCH_VIEW'),
('anonymous', 'REPORT_VIEW'),
('anonymous', 'REPORT_SQL_VIEW'),
('anonymous', 'TICKET_VIEW'),
('authenticated', 'TICKET_CREATE'),
('authenticated', 'TICKET_MODIFY'),
('anonymous', 'BROWSER_VIEW'),
('anonymous', 'TIMELINE_VIEW'),
('anonymous', 'CHANGESET_VIEW'),
('anonymous', 'ROADMAP_VIEW'),
('anonymous', 'MILESTONE_VIEW'))),
('system',
('name', 'value'),
(('database_version', str(db_version)),
('initial_database_version', str(db_version)))),
('report',
('author', 'title', 'query', 'description'),
__mkreports(get_reports(db))))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/attachment.py | trac/trac/attachment.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
from cStringIO import StringIO
from datetime import datetime
import errno
import os.path
import posixpath
import re
import shutil
import sys
import unicodedata
from genshi.builder import tag
from trac.admin import AdminCommandError, IAdminCommandProvider, PrefixList, \
console_datetime_format, get_dir_list
from trac.config import BoolOption, IntOption
from trac.core import *
from trac.mimeview import *
from trac.perm import PermissionError, IPermissionPolicy
from trac.resource import *
from trac.search import search_to_sql, shorten_result
from trac.util import content_disposition, get_reporter_id
from trac.util.compat import sha1
from trac.util.datefmt import format_datetime, from_utimestamp, \
to_datetime, to_utimestamp, utc
from trac.util.text import exception_to_unicode, path_to_unicode, \
pretty_size, print_table, unicode_unquote
from trac.util.translation import _, tag_
from trac.web import HTTPBadRequest, IRequestHandler, RequestDone
from trac.web.chrome import (INavigationContributor, add_ctxtnav, add_link,
add_stylesheet, web_context, add_warning)
from trac.web.href import Href
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class InvalidAttachment(TracError):
"""Exception raised when attachment validation fails."""
class IAttachmentChangeListener(Interface):
"""Extension point interface for components that require
notification when attachments are created or deleted."""
def attachment_added(attachment):
"""Called when an attachment is added."""
def attachment_deleted(attachment):
"""Called when an attachment is deleted."""
def attachment_reparented(attachment, old_parent_realm, old_parent_id):
"""Called when an attachment is reparented."""
class IAttachmentManipulator(Interface):
"""Extension point interface for components that need to
manipulate attachments.
Unlike change listeners, a manipulator can reject changes being
committed to the database."""
def prepare_attachment(req, attachment, fields):
"""Not currently called, but should be provided for future
compatibility."""
def validate_attachment(req, attachment):
"""Validate an attachment after upload but before being stored
in Trac environment.
Must return a list of ``(field, message)`` tuples, one for
each problem detected. ``field`` can be any of
``description``, ``username``, ``filename``, ``content``, or
`None` to indicate an overall problem with the
attachment. Therefore, a return value of ``[]`` means
everything is OK."""
class ILegacyAttachmentPolicyDelegate(Interface):
"""Interface that can be used by plugins to seemlessly participate
to the legacy way of checking for attachment permissions.
This should no longer be necessary once it becomes easier to
setup fine-grained permissions in the default permission store.
"""
def check_attachment_permission(action, username, resource, perm):
"""Return the usual `True`/`False`/`None` security policy
decision appropriate for the requested action on an
attachment.
:param action: one of ATTACHMENT_VIEW, ATTACHMENT_CREATE,
ATTACHMENT_DELETE
:param username: the user string
:param resource: the `~trac.resource.Resource` for the
attachment. Note that when
ATTACHMENT_CREATE is checked, the
resource ``.id`` will be `None`.
:param perm: the permission cache for that username and resource
"""
class Attachment(object):
def __init__(self, env, parent_realm_or_attachment_resource,
parent_id=None, filename=None, db=None):
if isinstance(parent_realm_or_attachment_resource, Resource):
self.resource = parent_realm_or_attachment_resource
else:
self.resource = Resource(parent_realm_or_attachment_resource,
parent_id).child('attachment', filename)
self.env = env
self.parent_realm = self.resource.parent.realm
self.parent_id = unicode(self.resource.parent.id)
if self.resource.id:
self._fetch(self.resource.id)
else:
self.filename = None
self.description = None
self.size = None
self.date = None
self.author = None
self.ipnr = None
def _set_filename(self, val):
self.resource.id = val
filename = property(lambda self: self.resource.id, _set_filename)
def _from_database(self, filename, description, size, time, author, ipnr):
self.filename = filename
self.description = description
self.size = int(size) if size else 0
self.date = from_utimestamp(time or 0)
self.author = author
self.ipnr = ipnr
def _fetch(self, filename):
for row in self.env.db_query("""
SELECT filename, description, size, time, author, ipnr
FROM attachment WHERE type=%s AND id=%s AND filename=%s
ORDER BY time
""", (self.parent_realm, unicode(self.parent_id), filename)):
self._from_database(*row)
break
else:
self.filename = filename
raise ResourceNotFound(_("Attachment '%(title)s' does not exist.",
title=self.title),
_('Invalid Attachment'))
# _get_path() and _get_hashed_filename() are class methods so that they
# can be used in db28.py.
@classmethod
def _get_path(cls, env_path, parent_realm, parent_id, filename):
"""Get the path of an attachment.
WARNING: This method is used by db28.py for moving attachments from
the old "attachments" directory to the "files" directory. Please check
all changes so that they don't break the upgrade.
"""
path = os.path.join(env_path, 'files', 'attachments',
parent_realm)
hash = sha1(parent_id.encode('utf-8')).hexdigest()
path = os.path.join(path, hash[0:3], hash)
if filename:
path = os.path.join(path, cls._get_hashed_filename(filename))
return os.path.normpath(path)
_extension_re = re.compile(r'\.[A-Za-z0-9]+\Z')
@classmethod
def _get_hashed_filename(cls, filename):
"""Get the hashed filename corresponding to the given filename.
WARNING: This method is used by db28.py for moving attachments from
the old "attachments" directory to the "files" directory. Please check
all changes so that they don't break the upgrade.
"""
hash = sha1(filename.encode('utf-8')).hexdigest()
match = cls._extension_re.search(filename)
return hash + match.group(0) if match else hash
@property
def path(self):
return self._get_path(self.env.path, self.parent_realm, self.parent_id,
self.filename)
@property
def title(self):
return '%s:%s: %s' % (self.parent_realm, self.parent_id, self.filename)
def delete(self, db=None):
"""Delete the attachment, both the record in the database and
the file itself.
.. versionchanged :: 1.0
the `db` parameter is no longer needed
(will be removed in version 1.1.1)
"""
assert self.filename, "Cannot delete non-existent attachment"
with self.env.db_transaction as db:
db("""
DELETE FROM attachment WHERE type=%s AND id=%s AND filename=%s
""", (self.parent_realm, self.parent_id, self.filename))
path = self.path
if os.path.isfile(path):
try:
os.unlink(path)
except OSError, e:
self.env.log.error("Failed to delete attachment "
"file %s: %s",
path,
exception_to_unicode(e, traceback=True))
raise TracError(_("Could not delete attachment"))
self.env.log.info("Attachment removed: %s" % self.title)
for listener in AttachmentModule(self.env).change_listeners:
listener.attachment_deleted(self)
ResourceSystem(self.env).resource_deleted(self)
def reparent(self, new_realm, new_id):
assert self.filename, "Cannot reparent non-existent attachment"
new_id = unicode(new_id)
new_path = self._get_path(self.env.path, new_realm, new_id,
self.filename)
# Make sure the path to the attachment is inside the environment
# attachments directory
attachments_dir = os.path.join(os.path.normpath(self.env.path),
'files', 'attachments')
commonprefix = os.path.commonprefix([attachments_dir, new_path])
if commonprefix != attachments_dir:
raise TracError(_('Cannot reparent attachment "%(att)s" as '
'%(realm)s:%(id)s is invalid',
att=self.filename, realm=new_realm, id=new_id))
if os.path.exists(new_path):
raise TracError(_('Cannot reparent attachment "%(att)s" as '
'it already exists in %(realm)s:%(id)s',
att=self.filename, realm=new_realm, id=new_id))
with self.env.db_transaction as db:
db("""UPDATE attachment SET type=%s, id=%s
WHERE type=%s AND id=%s AND filename=%s
""", (new_realm, new_id, self.parent_realm, self.parent_id,
self.filename))
dirname = os.path.dirname(new_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
path = self.path
if os.path.isfile(path):
try:
os.rename(path, new_path)
except OSError, e:
self.env.log.error("Failed to move attachment file %s: %s",
path,
exception_to_unicode(e, traceback=True))
raise TracError(_("Could not reparent attachment %(name)s",
name=self.filename))
old_realm, old_id = self.parent_realm, self.parent_id
self.parent_realm, self.parent_id = new_realm, new_id
self.resource = Resource(new_realm, new_id).child('attachment',
self.filename)
self.env.log.info("Attachment reparented: %s" % self.title)
for listener in AttachmentModule(self.env).change_listeners:
if hasattr(listener, 'attachment_reparented'):
listener.attachment_reparented(self, old_realm, old_id)
old_values = dict()
if self.parent_realm != old_realm:
old_values["parent_realm"] = old_realm
if self.parent_id != old_id:
old_values["parent_id"] = old_id
ResourceSystem(self.env).resource_changed(self, old_values=old_values)
def insert(self, filename, fileobj, size, t=None, db=None):
"""Create a new Attachment record and save the file content.
.. versionchanged :: 1.0
the `db` parameter is no longer needed
(will be removed in version 1.1.1)
"""
self.size = int(size) if size else 0
self.filename = None
if t is None:
t = datetime.now(utc)
elif not isinstance(t, datetime): # Compatibility with 0.11
t = to_datetime(t, utc)
self.date = t
# Make sure the path to the attachment is inside the environment
# attachments directory
attachments_dir = os.path.join(os.path.normpath(self.env.path),
'files', 'attachments')
dir = self.path
commonprefix = os.path.commonprefix([attachments_dir, dir])
if commonprefix != attachments_dir:
raise TracError(_('Cannot create attachment "%(att)s" as '
'%(realm)s:%(id)s is invalid',
att=filename, realm=self.parent_realm,
id=self.parent_id))
if not os.access(dir, os.F_OK):
os.makedirs(dir)
filename, targetfile = self._create_unique_file(dir, filename)
with targetfile:
with self.env.db_transaction as db:
db("INSERT INTO attachment VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
(self.parent_realm, self.parent_id, filename, self.size,
to_utimestamp(t), self.description, self.author,
self.ipnr))
shutil.copyfileobj(fileobj, targetfile)
self.resource.id = self.filename = filename
self.env.log.info("New attachment: %s by %s", self.title,
self.author)
for listener in AttachmentModule(self.env).change_listeners:
listener.attachment_added(self)
ResourceSystem(self.env).resource_created(self)
@classmethod
def select(cls, env, parent_realm, parent_id, db=None):
"""Iterator yielding all `Attachment` instances attached to
resource identified by `parent_realm` and `parent_id`.
.. versionchanged :: 1.0
the `db` parameter is no longer needed
(will be removed in version 1.1.1)
"""
for row in env.db_query("""
SELECT filename, description, size, time, author, ipnr
FROM attachment WHERE type=%s AND id=%s ORDER BY time
""", (parent_realm, unicode(parent_id))):
attachment = Attachment(env, parent_realm, parent_id)
attachment._from_database(*row)
yield attachment
@classmethod
def delete_all(cls, env, parent_realm, parent_id, db=None):
"""Delete all attachments of a given resource.
.. versionchanged :: 1.0
the `db` parameter is no longer needed
(will be removed in version 1.1.1)
"""
attachment_dir = None
with env.db_transaction as db:
for attachment in cls.select(env, parent_realm, parent_id, db):
attachment_dir = os.path.dirname(attachment.path)
attachment.delete()
if attachment_dir:
try:
os.rmdir(attachment_dir)
except OSError, e:
env.log.error("Can't delete attachment directory %s: %s",
attachment_dir, exception_to_unicode(e, traceback=True))
@classmethod
def reparent_all(cls, env, parent_realm, parent_id, new_realm, new_id):
"""Reparent all attachments of a given resource to another resource."""
attachment_dir = None
with env.db_transaction as db:
for attachment in list(cls.select(env, parent_realm, parent_id,
db)):
attachment_dir = os.path.dirname(attachment.path)
attachment.reparent(new_realm, new_id)
if attachment_dir:
try:
os.rmdir(attachment_dir)
except OSError, e:
env.log.error("Can't delete attachment directory %s: %s",
attachment_dir, exception_to_unicode(e, traceback=True))
def open(self):
path = self.path
self.env.log.debug('Trying to open attachment at %s', path)
try:
fd = open(path, 'rb')
except IOError:
raise ResourceNotFound(_("Attachment '%(filename)s' not found",
filename=self.filename))
return fd
def _create_unique_file(self, dir, filename):
parts = os.path.splitext(filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
if hasattr(os, 'O_BINARY'):
flags += os.O_BINARY
idx = 1
while 1:
path = os.path.join(dir, self._get_hashed_filename(filename))
try:
return filename, os.fdopen(os.open(path, flags, 0666), 'w')
except OSError, e:
if e.errno != errno.EEXIST:
raise
idx += 1
# A sanity check
if idx > 100:
raise Exception('Failed to create unique name: ' + path)
filename = '%s.%d%s' % (parts[0], idx, parts[1])
class AttachmentModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider,
IResourceManager)
change_listeners = ExtensionPoint(IAttachmentChangeListener)
manipulators = ExtensionPoint(IAttachmentManipulator)
CHUNK_SIZE = 4096
max_size = IntOption('attachment', 'max_size', 262144,
"""Maximum allowed file size (in bytes) for ticket and wiki
attachments.""")
max_zip_size = IntOption('attachment', 'max_zip_size', 2097152,
"""Maximum allowed total size (in bytes) for an attachment list to be
downloadable as a `.zip`. Set this to -1 to disable download as `.zip`.
(''since 1.0'')""")
render_unsafe_content = BoolOption('attachment', 'render_unsafe_content',
'false',
"""Whether attachments should be rendered in the browser, or
only made downloadable.
Pretty much any file may be interpreted as HTML by the browser,
which allows a malicious user to attach a file containing cross-site
scripting attacks.
For public sites where anonymous users can create attachments it is
recommended to leave this option disabled (which is the default).""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return req.args.get('realm')
def get_navigation_items(self, req):
return []
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/(raw-|zip-)?attachment/([^/]+)(?:/(.*))?$',
req.path_info)
if match:
format, realm, path = match.groups()
if format:
req.args['format'] = format[:-1]
req.args['realm'] = realm
if path:
req.args['path'] = path
return True
def process_request(self, req):
parent_id = None
parent_realm = req.args.get('realm')
path = req.args.get('path')
filename = None
if not parent_realm or not path:
raise HTTPBadRequest(_('Bad request'))
parent_realm = Resource(parent_realm)
action = req.args.get('action', 'view')
if action == 'new':
parent_id = path.rstrip('/')
else:
last_slash = path.rfind('/')
if last_slash == -1:
parent_id, filename = path, ''
else:
parent_id, filename = path[:last_slash], path[last_slash + 1:]
parent = parent_realm(id=parent_id)
# Link the attachment page to parent resource
parent_name = get_resource_name(self.env, parent)
parent_url = get_resource_url(self.env, parent, req.href)
add_link(req, 'up', parent_url, parent_name)
add_ctxtnav(req, _('Back to %(parent)s', parent=parent_name),
parent_url)
if not filename: # there's a trailing '/'
if req.args.get('format') == 'zip':
self._download_as_zip(req, parent)
elif action != 'new':
return self._render_list(req, parent)
attachment = Attachment(self.env, parent.child('attachment', filename))
if req.method == 'POST':
if action == 'new':
data = self._do_save(req, attachment)
elif action == 'delete':
self._do_delete(req, attachment)
elif action == 'delete':
data = self._render_confirm_delete(req, attachment)
elif action == 'new':
data = self._render_form(req, attachment)
else:
data = self._render_view(req, attachment)
add_stylesheet(req, 'common/css/code.css')
return 'attachment.html', data, None
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('raw-attachment', self._format_link)
yield ('attachment', self._format_link)
# Public methods
def viewable_attachments(self, context):
"""Return the list of viewable attachments in the given context.
:param context: the `~trac.mimeview.api.RenderingContext`
corresponding to the parent
`~trac.resource.Resource` for the attachments
"""
parent = context.resource
attachments = []
for attachment in Attachment.select(self.env, parent.realm, parent.id):
if 'ATTACHMENT_VIEW' in context.perm(attachment.resource):
attachments.append(attachment)
return attachments
def attachment_data(self, context):
"""Return a data dictionary describing the list of viewable
attachments in the current context.
"""
attachments = self.viewable_attachments(context)
parent = context.resource
total_size = sum(attachment.size for attachment in attachments)
new_att = parent.child('attachment')
return {'attach_href': get_resource_url(self.env, new_att,
context.href),
'download_href': get_resource_url(self.env, new_att,
context.href, format='zip')
if total_size <= self.max_zip_size else None,
'can_create': 'ATTACHMENT_CREATE' in context.perm(new_att),
'attachments': attachments,
'parent': context.resource}
def get_history(self, start, stop, realm):
"""Return an iterable of tuples describing changes to attachments on
a particular object realm.
The tuples are in the form (change, realm, id, filename, time,
description, author). `change` can currently only be `created`.
FIXME: no iterator
"""
for realm, id, filename, ts, description, author in \
self.env.db_query("""
SELECT type, id, filename, time, description, author
FROM attachment WHERE time > %s AND time < %s AND type = %s
""", (to_utimestamp(start), to_utimestamp(stop), realm)):
time = from_utimestamp(ts or 0)
yield ('created', realm, id, filename, time, description, author)
def get_timeline_events(self, req, resource_realm, start, stop):
"""Return an event generator suitable for ITimelineEventProvider.
Events are changes to attachments on resources of the given
`resource_realm.realm`.
"""
for change, realm, id, filename, time, descr, author in \
self.get_history(start, stop, resource_realm.realm):
attachment = resource_realm(id=id).child('attachment', filename)
if 'ATTACHMENT_VIEW' in req.perm(attachment):
yield ('attachment', time, author, (attachment, descr), self)
def render_timeline_event(self, context, field, event):
attachment, descr = event[3]
if field == 'url':
return self.get_resource_url(attachment, context.href)
elif field == 'title':
name = get_resource_name(self.env, attachment.parent)
title = get_resource_summary(self.env, attachment.parent)
return tag_("%(attachment)s attached to %(resource)s",
attachment=tag.em(os.path.basename(attachment.id)),
resource=tag.em(name, title=title))
elif field == 'description':
return format_to(self.env, None, context.child(attachment.parent),
descr)
def get_search_results(self, req, resource_realm, terms):
"""Return a search result generator suitable for ISearchSource.
Search results are attachments on resources of the given
`resource_realm.realm` whose filename, description or author match
the given terms.
"""
with self.env.db_query as db:
sql_query, args = search_to_sql(
db, ['filename', 'description', 'author'], terms)
for id, time, filename, desc, author in db("""
SELECT id, time, filename, description, author
FROM attachment WHERE type = %s AND """ + sql_query,
(resource_realm.realm,) + args):
attachment = resource_realm(id=id).child('attachment', filename)
if 'ATTACHMENT_VIEW' in req.perm(attachment):
yield (get_resource_url(self.env, attachment, req.href),
get_resource_shortname(self.env, attachment),
from_utimestamp(time), author,
shorten_result(desc, terms))
# IResourceManager methods
def get_resource_realms(self):
yield 'attachment'
def get_resource_url(self, resource, href, **kwargs):
"""Return an URL to the attachment itself.
A `format` keyword argument equal to `'raw'` will be converted
to the raw-attachment prefix.
"""
if not resource.parent:
return None
format = kwargs.get('format')
prefix = 'attachment'
if format in ('raw', 'zip'):
kwargs.pop('format')
prefix = format + '-attachment'
parent_href = unicode_unquote(get_resource_url(self.env,
resource.parent(version=None), Href('')))
if not resource.id:
# link to list of attachments, which must end with a trailing '/'
# (see process_request)
return href(prefix, parent_href, '', **kwargs)
else:
return href(prefix, parent_href, resource.id, **kwargs)
def get_resource_description(self, resource, format=None, **kwargs):
if not resource.parent:
return _("Unparented attachment %(id)s", id=resource.id)
nbhprefix = ResourceSystem(self.env).neighborhood_prefix(
resource.neighborhood)
if format == 'compact':
return '%s%s (%s)' % (nbhprefix, resource.id,
get_resource_name(self.env, resource.parent))
elif format == 'summary':
return Attachment(self.env, resource).description
if resource.id:
desc = _("Attachment '%(id)s' in %(parent)s", id=resource.id,
parent=get_resource_name(self.env, resource.parent))
else:
desc = _("Attachments of %(parent)s",
parent=get_resource_name(self.env, resource.parent))
if resource.neighborhood is not None:
desc = nbhprefix + desc
return desc
def resource_exists(self, resource):
try:
attachment = Attachment(self.env, resource)
return os.path.exists(attachment.path)
except ResourceNotFound:
return False
# Internal methods
def _do_save(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_CREATE')
parent_resource = attachment.resource.parent
if not resource_exists(self.env, parent_resource):
raise ResourceNotFound(
_("%(parent)s doesn't exist, can't create attachment",
parent=get_resource_name(self.env, parent_resource)))
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, parent_resource, req.href))
upload = req.args['attachment']
if not hasattr(upload, 'filename') or not upload.filename:
raise TracError(_('No file uploaded'))
if hasattr(upload.file, 'fileno'):
size = os.fstat(upload.file.fileno())[6]
else:
upload.file.seek(0, 2) # seek to end of file
size = upload.file.tell()
upload.file.seek(0)
if size == 0:
raise TracError(_("Can't upload empty file"))
# Maximum attachment size (in bytes)
max_size = self.max_size
if max_size >= 0 and size > max_size:
raise TracError(_('Maximum attachment size: %(num)s bytes',
num=max_size), _('Upload failed'))
# We try to normalize the filename to unicode NFC if we can.
# Files uploaded from OS X might be in NFD.
filename = unicodedata.normalize('NFC', unicode(upload.filename,
'utf-8'))
filename = filename.strip()
# Replace backslashes with slashes if filename is Windows full path
if filename.startswith('\\') or re.match(r'[A-Za-z]:\\', filename):
filename = filename.replace('\\', '/')
# We want basename to be delimited by only slashes on all platforms
filename = posixpath.basename(filename)
if not filename:
raise TracError(_('No file uploaded'))
# Now the filename is known, update the attachment resource
attachment.filename = filename
attachment.description = req.args.get('description', '')
attachment.author = get_reporter_id(req, 'author')
attachment.ipnr = req.remote_addr
# Validate attachment
valid = True
for manipulator in self.manipulators:
for field, message in manipulator.validate_attachment(req,
attachment):
valid = False
if field:
add_warning(req,
_('Attachment field %(field)s is invalid: %(message)s',
field=field, message=message))
else:
add_warning(req,
_('Invalid attachment: %(message)s', message=message))
if not valid:
# Display the attach form with pre-existing data
# NOTE: Local file path not known, file field cannot be repopulated
add_warning(req, _('Note: File must be selected again.'))
data = self._render_form(req, attachment)
data['is_replace'] = req.args.get('replace')
return data
if req.args.get('replace'):
try:
old_attachment = Attachment(self.env,
attachment.resource(id=filename))
if not (req.authname and req.authname != 'anonymous' \
and old_attachment.author == req.authname) \
and 'ATTACHMENT_DELETE' \
not in req.perm(attachment.resource):
raise PermissionError(msg=_("You don't have permission to "
"replace the attachment %(name)s. You can only "
"replace your own attachments. Replacing other's "
"attachments requires ATTACHMENT_DELETE permission.",
name=filename))
if (not attachment.description.strip() and
old_attachment.description):
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/core.py | trac/trac/core.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2011 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
__all__ = ['Component', 'ExtensionPoint', 'implements', 'Interface',
'TracError']
def N_(string):
"""No-op translation marker, inlined here to avoid importing from
`trac.util`.
"""
return string
class TracError(Exception):
"""Exception base class for errors in Trac."""
title = N_('Trac Error')
def __init__(self, message, title=None, show_traceback=False):
"""If message is a genshi.builder.tag object, everything up to
the first <p> will be displayed in the red box, and everything
after will be displayed below the red box. If title is given,
it will be displayed as the large header above the error
message.
"""
from trac.util.translation import gettext
Exception.__init__(self, message)
self._message = message
self.title = title or gettext(self.title)
self.show_traceback = show_traceback
message = property(lambda self: self._message,
lambda self, v: setattr(self, '_message', v))
def __unicode__(self):
return unicode(self.message)
class Interface(object):
"""Marker base class for extension point interfaces."""
class ExtensionPoint(property):
"""Marker class for extension points in components."""
def __init__(self, interface):
"""Create the extension point.
:param interface: the `Interface` subclass that defines the
protocol for the extension point
"""
property.__init__(self, self.extensions)
self.interface = interface
self.__doc__ = ("List of components that implement `~%s.%s`" %
(self.interface.__module__, self.interface.__name__))
def extensions(self, component):
"""Return a list of components that declare to implement the
extension point interface.
"""
classes = ComponentMeta._registry.get(self.interface, ())
components = [component.compmgr[cls] for cls in classes]
return [c for c in components if c]
def __repr__(self):
"""Return a textual representation of the extension point."""
return '<ExtensionPoint %s>' % self.interface.__name__
class ComponentMeta(type):
"""Meta class for components.
Takes care of component and extension point registration.
"""
_components = []
_registry = {}
def __new__(mcs, name, bases, d):
"""Create the component class."""
def nonrecursive_init(cls):
"""Replaces __init__ of the class with one that checks for
recursion"""
original_init = cls.__init__
compmgrs = set()
def new_init(self, *args, **kwargs):
"""Only run the original __init__ once per component manager"""
if self.compmgr not in compmgrs:
try:
compmgrs.add(self.compmgr)
original_init(self, *args, **kwargs)
finally:
compmgrs.remove(self.compmgr)
cls.__init__ = new_init
return cls
new_class = nonrecursive_init(type.__new__(mcs, name, bases, d))
if name == 'Component':
# Don't put the Component base class in the registry
return new_class
if d.get('abstract'):
# Don't put abstract component classes in the registry
return new_class
ComponentMeta._components.append(new_class)
registry = ComponentMeta._registry
for cls in new_class.__mro__:
for interface in cls.__dict__.get('_implements', ()):
classes = registry.setdefault(interface, [])
if new_class not in classes:
classes.append(new_class)
return new_class
def __call__(cls, *args, **kwargs):
"""Return an existing instance of the component if it has
already been activated, otherwise create a new instance.
"""
# If this component is also the component manager, just invoke that
if issubclass(cls, ComponentManager):
self = cls.__new__(cls)
self.compmgr = self
self.__init__(*args, **kwargs)
return self
# The normal case where the component is not also the component manager
compmgr = args[0]
self = compmgr.components.get(cls)
# Note that this check is racy, we intentionally don't use a
# lock in order to keep things simple and avoid the risk of
# deadlocks, as the impact of having temporarily two (or more)
# instances for a given `cls` is negligible.
if self is None:
self = cls.__new__(cls)
self.compmgr = compmgr
compmgr.component_activated(self)
self.__init__()
# Only register the instance once it is fully initialized (#9418)
compmgr.components[cls] = self
return self
class Component(object):
"""Base class for components.
Every component can declare what extension points it provides, as
well as what extension points of other components it extends.
"""
__metaclass__ = ComponentMeta
@staticmethod
def implements(*interfaces):
"""Can be used in the class definition of `Component`
subclasses to declare the extension points that are extended.
"""
import sys
frame = sys._getframe(1)
locals_ = frame.f_locals
# Some sanity checks
assert locals_ is not frame.f_globals and '__module__' in locals_, \
'implements() can only be used in a class definition'
locals_.setdefault('_implements', []).extend(interfaces)
implements = Component.implements
class ComponentManager(object):
"""The component manager keeps a pool of active components."""
def __init__(self):
"""Initialize the component manager."""
self.components = {}
self.enabled = {}
if isinstance(self, Component):
self.components[self.__class__] = self
def __contains__(self, cls):
"""Return wether the given class is in the list of active
components."""
return cls in self.components
def __getitem__(self, cls):
"""Activate the component instance for the given class, or
return the existing instance if the component has already been
activated.
"""
if not self.is_enabled(cls):
return None
component = self.components.get(cls)
# Leave other manager components out of extension point lists
# see bh:comment:5:ticket:438 and ticket:11121
if not component and not issubclass(cls, ComponentManager) :
if cls not in ComponentMeta._components:
raise TracError('Component "%s" not registered' % cls.__name__)
try:
component = cls(self)
except TypeError, e:
raise TracError('Unable to instantiate component %r (%s)' %
(cls, e))
return component
def is_enabled(self, cls):
"""Return whether the given component class is enabled."""
if cls not in self.enabled:
self.enabled[cls] = self.is_component_enabled(cls)
return self.enabled[cls]
def disable_component(self, component):
"""Force a component to be disabled.
:param component: can be a class or an instance.
"""
if not isinstance(component, type):
component = component.__class__
self.enabled[component] = False
self.components[component] = None
def component_activated(self, component):
"""Can be overridden by sub-classes so that special
initialization for components can be provided.
"""
def is_component_enabled(self, cls):
"""Can be overridden by sub-classes to veto the activation of
a component.
If this method returns `False`, the component was disabled
explicitly. If it returns `None`, the component was neither
enabled nor disabled explicitly. In both cases, the component
with the given class will not be available.
"""
return True
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/log.py | trac/trac/log.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
import logging
import logging.handlers
import sys
def logger_handler_factory(logtype='syslog', logfile=None, level='WARNING',
logid='Trac', format=None):
logger = logging.getLogger(logid)
logtype = logtype.lower()
if logtype == 'file':
hdlr = logging.FileHandler(logfile)
elif logtype in ('winlog', 'eventlog', 'nteventlog'):
# Requires win32 extensions
hdlr = logging.handlers.NTEventLogHandler(logid,
logtype='Application')
elif logtype in ('syslog', 'unix'):
hdlr = logging.handlers.SysLogHandler('/dev/log')
elif logtype in ('stderr'):
hdlr = logging.StreamHandler(sys.stderr)
else:
hdlr = logging.handlers.BufferingHandler(0)
# Note: this _really_ throws away log events, as a `MemoryHandler`
# would keep _all_ records in case there's no target handler (a bug?)
if not format:
format = 'Trac[%(module)s] %(levelname)s: %(message)s'
if logtype in ('file', 'stderr'):
format = '%(asctime)s ' + format
datefmt = ''
if logtype == 'stderr':
datefmt = '%X'
level = level.upper()
if level in ('DEBUG', 'ALL'):
logger.setLevel(logging.DEBUG)
elif level == 'INFO':
logger.setLevel(logging.INFO)
elif level == 'ERROR':
logger.setLevel(logging.ERROR)
elif level == 'CRITICAL':
logger.setLevel(logging.CRITICAL)
else:
logger.setLevel(logging.WARNING)
formatter = logging.Formatter(format, datefmt)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# Remember our handler so that we can remove it later
logger._trac_handler = hdlr
return logger, hdlr
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/resource.py | trac/trac/resource.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006-2007 Alec Thomas <alec@swapoff.org>
# Copyright (C) 2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <cboos@edgewall.org>
# Alec Thomas <alec@swapoff.org>
from trac.core import *
from trac.util.translation import _
class ResourceNotFound(TracError):
"""Thrown when a non-existent resource is requested"""
class IResourceManager(Interface):
def get_resource_realms():
"""Return resource realms managed by the component.
:rtype: `basestring` generator
"""
def get_resource_url(resource, href, **kwargs):
"""Return the canonical URL for displaying the given resource.
:param resource: a `Resource`
:param href: an `Href` used for creating the URL
Note that if there's no special rule associated to this realm for
creating URLs (i.e. the standard convention of using realm/id applies),
then it's OK to not define this method.
"""
def get_resource_description(resource, format='default', context=None,
**kwargs):
"""Return a string representation of the resource, according to the
`format`.
:param resource: the `Resource` to describe
:param format: the kind of description wanted. Typical formats are:
`'default'`, `'compact'` or `'summary'`.
:param context: an optional rendering context to allow rendering rich
output (like markup containing links)
:type context: `ResourceContext`
Additional keyword arguments can be given as extra information for
some formats.
For example, the ticket with the id 123 is represented as:
- `'#123'` in `'compact'` format,
- `'Ticket #123'` for the `default` format.
- `'Ticket #123 (closed defect): This is the summary'` for the
`'summary'` format
Note that it is also OK to not define this method if there's no
special way to represent the resource, in which case the standard
representations 'realm:id' (in compact mode) or 'Realm id' (in
default mode) will be used.
"""
def resource_exists(resource):
"""Check whether the given `resource` exists physically.
:rtype: bool
Attempting to retrieve the model object for a non-existing
resource should raise a `ResourceNotFound` exception.
(''since 0.11.8'')
"""
class IExternalResourceConnector(Interface):
def get_supported_neighborhoods():
"""Return supported manager neighborhoods.
:rtype: `basestring` generator
"""
def load_manager(neighborhood):
"""Load the component manager identified by a given neighborhood.
:param neighborhood: manager identifier (i.e. `Neighborhood`)
:rtype: `trac.core.ComponentManager`
"""
def manager_exists(neighborhood):
"""Check whether the component manager identified by
the given `neighborhood` exists physically.
:param neighborhood: manager identifier (i.e. `Neighborhood`)
:rtype: bool
Attempting to retrieve the manager object for a non-existing
neighborhood should raise a `ResourceNotFound` exception.
"""
class Neighborhood(object):
"""Neighborhoods are the topmost level in the resources hierarchy.
They represent resources managed by a component manager, thereby
identifying the later. As such, resource neighborhoods serve to
the purpose of specifying absolute references to resources hosted beyond
the boundaries of a given component manager. As a side effect they are
the key used to load component managers at run time.
"""
__slots__ = ('_realm', '_id')
@property
def is_null(self):
return (self._realm, self._id) == (None, None)
def __repr__(self):
if self.is_null:
return '<Neighborhood (null)>'
else:
return '<Neighborhood %s:%s>' % (self._realm, self._id)
def __eq__(self, other):
return isinstance(other, Neighborhood) and \
self._realm == other._realm and \
self._id == other._id
def __hash__(self):
"""Hash this resource descriptor, including its hierarchy."""
return hash((self._realm, self._id))
@property
def id(self):
return None
@id.setter
def id(self, value):
pass
realm = parent = neighborhood = version = id
# -- methods for creating other Resource identifiers
def __new__(cls, neighborhood_or_realm=None, id=False):
"""Create a new Neighborhood object from a specification.
:param neighborhood_or_realm: this can be either:
- a `Neighborhood`, which is then used as a base for making a copy
- a `basestring`, used to specify a `realm`
:param id: the neighborhood identifier
:param version: the version or `None` for indicating the latest version
>>> main = Neighborhood('nbh', 'id')
>>> repr(main)
'<Neighborhood nbh:id>'
>>> Neighborhood(main) is main
True
>>> repr(Neighborhood(None))
'<Neighborhood (null)>'
"""
realm = neighborhood_or_realm
if isinstance(neighborhood_or_realm, Neighborhood):
if id is False:
return neighborhood_or_realm
else: # copy and override
realm = neighborhood_or_realm._realm
elif id is False:
id = None
neighborhood = super(Neighborhood, cls).__new__(cls)
neighborhood._realm = realm
neighborhood._id = id
return neighborhood
def __call__(self, realm=False, id=False, version=False, parent=False):
"""Create a new Resource using the current resource as a template.
Optional keyword arguments can be given to override `id` and
`version`.
>>> nbh = Neighborhood('nbh', 'id')
>>> repr(nbh)
'<Neighborhood nbh:id>'
>>> main = nbh('wiki', 'WikiStart')
>>> repr(main)
"<Resource u'wiki:WikiStart' in Neighborhood nbh:id>"
>>> Resource(main) is main
True
>>> main3 = Resource(main, version=3)
>>> repr(main3)
"<Resource u'wiki:WikiStart@3' in Neighborhood nbh:id>"
>>> main0 = main3(version=0)
>>> repr(main0)
"<Resource u'wiki:WikiStart@0' in Neighborhood nbh:id>"
In a copy, if `id` is overriden, then the original `version` value
will not be reused.
>>> repr(Resource(main3, id="WikiEnd"))
"<Resource u'wiki:WikiEnd' in Neighborhood nbh:id>"
>>> repr(nbh(None))
'<Neighborhood nbh:id>'
Null neighborhood will be used to put absolute resource
references ban into relative form (i.e. `resource.neiighborhood = None`)
>>> nullnbh = Neighborhood(None, None)
>>> repr(nullnbh)
'<Neighborhood (null)>'
>>> repr(nullnbh(main))
"<Resource u'wiki:WikiStart'>"
>>> repr(nullnbh(main3))
"<Resource u'wiki:WikiStart@3'>"
>>> repr(nullnbh(main0))
"<Resource u'wiki:WikiStart@0'>"
"""
if (realm, id, version, parent) in ((False, False, False, False),
(None, False, False, False)):
return self
else:
resource = Resource(realm, id, version, parent)
if resource.neighborhood is not self:
resource = self._update_parents(resource)
return resource
def _update_parents(self, resource):
if self.is_null and resource.neighborhood is None:
return resource
newresource = Resource(resource.realm, resource.id, resource.version, self)
current = newresource
parent = resource.parent
while parent is not None:
current.parent = Resource(parent.realm, parent.id, parent.version, self)
current = current.parent
parent = parent.parent
return newresource
# -- methods for retrieving children Resource identifiers
def child(self, realm, id=False, version=False):
"""Retrieve a child resource for a secondary `realm`.
Same as `__call__`, except that this one sets the parent to `self`.
>>> repr(Neighborhood('realm', 'id').child('attachment', 'file.txt'))
"<Resource u'attachment:file.txt' in Neighborhood realm:id>"
"""
return self(realm, id, version)
class Resource(object):
"""Resource identifier.
This specifies as precisely as possible *which* resource from a Trac
environment is manipulated.
A resource is identified by:
(- a `project` identifier) 0.12?
- a `realm` (a string like `'wiki'` or `'ticket'`)
- an `id`, which uniquely identifies a resource within its realm.
If the `id` information is not set, then the resource represents
the realm as a whole.
- an optional `version` information.
If `version` is `None`, this refers by convention to the latest
version of the resource.
Some generic and commonly used rendering methods are associated as well
to the Resource object. Those properties and methods actually delegate
the real work to the Resource's manager.
"""
__slots__ = ('realm', 'id', 'version', 'parent', 'neighborhood')
def __repr__(self):
path = []
r = self
while r:
name = r.realm
if r.id:
name += ':' + unicode(r.id) # id can be numerical
if r.version is not None:
name += '@' + unicode(r.version)
path.append(name or '')
r = r.parent
path = reversed(path)
if self.neighborhood is None:
return '<Resource %r>' % (', '.join(path))
else:
return '<Resource %r in Neighborhood %s:%s>' % (', '.join(path),
self.neighborhood._realm,
self.neighborhood._id)
def __eq__(self, other):
return isinstance(other, Resource) and \
self.realm == other.realm and \
self.id == other.id and \
self.version == other.version and \
self.parent == other.parent and \
self.neighborhood == other.neighborhood
def __hash__(self):
"""Hash this resource descriptor, including its hierarchy."""
path = ()
current = self
while current:
path += (self.realm, self.id, self.version)
current = current.parent
if self.neighborhood is not None:
# FIXME: Collisions !!!
path = (self.neighborhood._realm, self.neighborhood._id) + path
else:
path = (None, None) + path
return hash(path)
# -- methods for creating other Resource identifiers
def __new__(cls, resource_or_realm=None, id=False, version=False,
parent=False):
"""Create a new Resource object from a specification.
:param resource_or_realm: this can be either:
- a `Resource`, which is then used as a base for making a copy
- a `basestring`, used to specify a `realm`
:param id: the resource identifier
:param version: the version or `None` for indicating the latest version
>>> main = Resource('wiki', 'WikiStart')
>>> repr(main)
"<Resource u'wiki:WikiStart'>"
>>> Resource(main) is main
True
>>> main3 = Resource(main, version=3)
>>> repr(main3)
"<Resource u'wiki:WikiStart@3'>"
>>> main0 = main3(version=0)
>>> repr(main0)
"<Resource u'wiki:WikiStart@0'>"
In a copy, if `id` is overriden, then the original `version` value
will not be reused.
>>> repr(Resource(main3, id="WikiEnd"))
"<Resource u'wiki:WikiEnd'>"
>>> repr(Resource(None))
"<Resource ''>"
"""
realm = resource_or_realm
if isinstance(parent, Neighborhood):
neighborhood = parent
parent = False
else:
neighborhood = None
if isinstance(resource_or_realm, Resource):
if id is False and version is False and parent is False:
return resource_or_realm
else: # copy and override
realm = resource_or_realm.realm
if id is False:
id = resource_or_realm.id
if version is False:
if id == resource_or_realm.id:
version = resource_or_realm.version # could be 0...
else:
version = None
if parent is False:
parent = resource_or_realm.parent
neighborhood = neighborhood or resource_or_realm.neighborhood
else:
if id is False:
id = None
if version is False:
version = None
if parent is False:
parent = None
neighborhood = neighborhood or getattr(parent, 'neighborhood', None)
resource = super(Resource, cls).__new__(cls)
resource.realm = realm
resource.id = id
resource.version = version
resource.parent = parent
if neighborhood and neighborhood.is_null:
neighborhood = None
resource.neighborhood = neighborhood
return resource
def __call__(self, realm=False, id=False, version=False, parent=False):
"""Create a new Resource using the current resource as a template.
Optional keyword arguments can be given to override `id` and
`version`.
"""
return Resource(self if realm is False else realm, id, version, parent)
# -- methods for retrieving children Resource identifiers
def child(self, realm, id=False, version=False):
"""Retrieve a child resource for a secondary `realm`.
Same as `__call__`, except that this one sets the parent to `self`.
>>> repr(Resource(None).child('attachment', 'file.txt'))
"<Resource u', attachment:file.txt'>"
"""
return Resource(realm, id, version, self)
class IResourceChangeListener(Interface):
"""Extension point interface for components that require notification
when resources are created, modified, or deleted.
'resource' parameters is instance of the a resource e.g. ticket, milestone
etc.
'context' is an action context, may contain author, comment etc. Context
content depends on a resource type.
"""
def match_resource(resource):
"""Return whether the listener wants to process the given resource."""
def resource_created(resource, context):
"""
Called when a resource is created.
"""
def resource_changed(resource, old_values, context):
"""Called when a resource is modified.
`old_values` is a dictionary containing the previous values of the
resource properties that changed. Properties are specific for resource
type.
"""
def resource_deleted(resource, context):
"""Called when a resource is deleted."""
def resource_version_deleted(resource, context):
"""Called when a version of a resource has been deleted."""
class ResourceSystem(Component):
"""Resource identification and description manager.
This component makes the link between `Resource` identifiers and their
corresponding manager `Component`.
"""
resource_connectors = ExtensionPoint(IExternalResourceConnector)
resource_managers = ExtensionPoint(IResourceManager)
change_listeners = ExtensionPoint(IResourceChangeListener)
def __init__(self):
self._resource_managers_map = None
self._resource_connector_map = None
# Public methods
def get_resource_manager(self, realm):
"""Return the component responsible for resources in the given `realm`
:param realm: the realm name
:return: a `Component` implementing `IResourceManager` or `None`
"""
# build a dict of realm keys to IResourceManager implementations
if not self._resource_managers_map:
map = {}
for manager in self.resource_managers:
for manager_realm in manager.get_resource_realms() or []:
map[manager_realm] = manager
self._resource_managers_map = map
return self._resource_managers_map.get(realm)
def get_known_realms(self):
"""Return a list of all the realm names of resource managers."""
realms = []
for manager in self.resource_managers:
for realm in manager.get_resource_realms() or []:
realms.append(realm)
return realms
def get_resource_connector(self, realm):
"""Return the component responsible for loading component managers
given the neighborhood `realm`
:param realm: the realm name
:return: a `ComponentManager` implementing `IExternalResourceConnector`
or `None`
"""
# build a dict of neighborhood realm keys to target implementations
if not self._resource_connector_map:
map = {}
for connector in self.resource_connectors:
for conn_realm in connector.get_supported_neighborhoods() or []:
map[conn_realm] = connector
self._resource_connector_map = map
return self._resource_connector_map.get(realm)
def get_known_neighborhoods(self):
"""Return a list of all the realm names of neighborhoods."""
realms = []
for connector in self.resource_connectors:
for realm in connector.get_supported_neighborhoods() or []:
realms.append(realm)
return realms
def load_component_manager(self, neighborhood, default=None):
"""Load the component manager identified by a given instance of
`Neighborhood` class.
:throws ResourceNotFound: if there is no connector for neighborhood
"""
if neighborhood is None or neighborhood._realm is None:
if default is not None:
return default
else:
raise ResourceNotFound('Unexpected neighborhood %s' %
(neighborhood,))
c = self.get_resource_connector(neighborhood._realm)
if c is None:
raise ResourceNotFound('Missing connector for neighborhood %s' %
(neighborhood,))
return c.load_manager(neighborhood)
def neighborhood_prefix(self, neighborhood):
return '' if neighborhood is None \
else '[%s:%s] ' % (neighborhood._realm,
neighborhood._id or '')
# -- Utilities to trigger resources event notifications
def resource_created(self, resource, context=None):
for listener in self.change_listeners:
if listener.match_resource(resource):
listener.resource_created(resource, context)
def resource_changed(self, resource, old_values, context=None):
for listener in self.change_listeners:
if listener.match_resource(resource):
listener.resource_changed(resource, old_values, context)
def resource_deleted(self, resource, context=None):
for listener in self.change_listeners:
if listener.match_resource(resource):
listener.resource_deleted(resource, context)
def resource_version_deleted(self, resource, context=None):
for listener in self.change_listeners:
if listener.match_resource(resource):
listener.resource_version_deleted(resource, context)
def manager_for_neighborhood(compmgr, neighborhood):
"""Instantiate a given component manager identified by
target neighborhood.
:param compmgr: Source component manager.
:param neighborhood: Target neighborhood
:throws ResourceNotFound: if there is no connector for neighborhood
"""
rsys = ResourceSystem(compmgr)
return rsys.load_component_manager(neighborhood, compmgr)
# -- Utilities for manipulating resources in a generic way
def get_resource_url(env, resource, href, **kwargs):
"""Retrieve the canonical URL for the given resource.
This function delegates the work to the resource manager for that
resource if it implements a `get_resource_url` method, otherwise
reverts to simple '/realm/identifier' style URLs.
:param env: the `Environment` where `IResourceManager` components live
:param resource: the `Resource` object specifying the Trac resource
:param href: an `Href` object used for building the URL
Additional keyword arguments are translated as query parameters in the URL.
>>> from trac.test import EnvironmentStub
>>> from trac.web.href import Href
>>> env = EnvironmentStub()
>>> href = Href('/trac.cgi')
>>> main = Resource('generic', 'Main')
>>> get_resource_url(env, main, href)
'/trac.cgi/generic/Main'
>>> get_resource_url(env, main(version=3), href)
'/trac.cgi/generic/Main?version=3'
>>> get_resource_url(env, main(version=3), href)
'/trac.cgi/generic/Main?version=3'
>>> get_resource_url(env, main(version=3), href, action='diff')
'/trac.cgi/generic/Main?action=diff&version=3'
>>> get_resource_url(env, main(version=3), href, action='diff', version=5)
'/trac.cgi/generic/Main?action=diff&version=5'
"""
try:
rsys = ResourceSystem(manager_for_neighborhood(env,
resource.neighborhood))
except ResourceNotFound:
pass
else:
if rsys.env is not env:
# Use absolute href for external resources
href = rsys.env.abs_href
manager = rsys.get_resource_manager(resource.realm)
if manager and hasattr(manager, 'get_resource_url'):
return manager.get_resource_url(resource, href, **kwargs)
args = {'version': resource.version}
args.update(kwargs)
return href(resource.realm, resource.id, **args)
def get_resource_description(env, resource, format='default', **kwargs):
"""Retrieve a standardized description for the given resource.
This function delegates the work to the resource manager for that
resource if it implements a `get_resource_description` method,
otherwise reverts to simple presentation of the realm and identifier
information.
:param env: the `Environment` where `IResourceManager` components live
:param resource: the `Resource` object specifying the Trac resource
:param format: which formats to use for the description
Additional keyword arguments can be provided and will be propagated
to resource manager that might make use of them (typically, a `context`
parameter for creating context dependent output).
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> main = Resource('generic', 'Main')
>>> get_resource_description(env, main)
u'generic:Main'
>>> get_resource_description(env, main(version=3))
u'generic:Main'
>>> get_resource_description(env, main(version=3), format='summary')
u'generic:Main at version 3'
"""
try:
rsys = ResourceSystem(manager_for_neighborhood(env,
resource.neighborhood))
except ResourceNotFound:
rsys = ResourceSystem(env)
else:
manager = rsys.get_resource_manager(resource.realm)
if manager and hasattr(manager, 'get_resource_description'):
return manager.get_resource_description(resource, format, **kwargs)
nbhprefix = rsys.neighborhood_prefix(resource.neighborhood)
name = u'%s%s:%s' % (nbhprefix, resource.realm, resource.id)
if format == 'summary':
name = _('%(name)s at version %(version)s',
name=name, version=resource.version)
return name
def get_resource_name(env, resource):
return get_resource_description(env, resource)
def get_resource_shortname(env, resource):
return get_resource_description(env, resource, 'compact')
def get_resource_summary(env, resource):
return get_resource_description(env, resource, 'summary')
def get_relative_resource(resource, path=''):
"""Build a Resource relative to a reference resource.
:param path: path leading to another resource within the same realm.
"""
if path in (None, '', '.'):
return resource
else:
base = unicode(resource.id if path[0] != '/' else '').split('/')
for comp in path.split('/'):
if comp == '..':
if base:
base.pop()
elif comp and comp != '.':
base.append(comp)
return resource(id='/'.join(base) if base else None)
def get_relative_url(env, resource, href, path='', **kwargs):
"""Build an URL relative to a resource given as reference.
:param path: path leading to another resource within the same realm.
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> from trac.web.href import Href
>>> href = Href('/trac.cgi')
>>> main = Resource('wiki', 'Main', version=3)
Without parameters, return the canonical URL for the resource, like
`get_resource_url` does.
>>> get_relative_url(env, main, href)
'/trac.cgi/wiki/Main?version=3'
Paths are relative to the given resource:
>>> get_relative_url(env, main, href, '.')
'/trac.cgi/wiki/Main?version=3'
>>> get_relative_url(env, main, href, './Sub')
'/trac.cgi/wiki/Main/Sub'
>>> get_relative_url(env, main, href, './Sub/Infra')
'/trac.cgi/wiki/Main/Sub/Infra'
>>> get_relative_url(env, main, href, './Sub/')
'/trac.cgi/wiki/Main/Sub'
>>> mainsub = main(id='Main/Sub')
>>> get_relative_url(env, mainsub, href, '..')
'/trac.cgi/wiki/Main'
>>> get_relative_url(env, main, href, '../Other')
'/trac.cgi/wiki/Other'
References always stay within the current resource realm:
>>> get_relative_url(env, mainsub, href, '../..')
'/trac.cgi/wiki'
>>> get_relative_url(env, mainsub, href, '../../..')
'/trac.cgi/wiki'
>>> get_relative_url(env, mainsub, href, '/toplevel')
'/trac.cgi/wiki/toplevel'
Extra keyword arguments are forwarded as query parameters:
>>> get_relative_url(env, main, href, action='diff')
'/trac.cgi/wiki/Main?action=diff&version=3'
"""
return get_resource_url(env, get_relative_resource(resource, path),
href, **kwargs)
def render_resource_link(env, context, resource, format='default'):
"""Utility for generating a link `Element` to the given resource.
Some component manager may directly use an extra `context` parameter
in order to directly generate rich content. Otherwise, the textual output
is wrapped in a link to the resource.
"""
from genshi.builder import Element, tag
link = get_resource_description(env, resource, format, context=context)
if not isinstance(link, Element):
link = tag.a(link, href=get_resource_url(env, resource, context.href))
return link
def resource_exists(env, resource):
"""Checks for resource existence without actually instantiating a model.
:return: `True` if the resource exists, `False` if it doesn't
and `None` in case no conclusion could be made (i.e. when
`IResourceManager.resource_exists` is not implemented).
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> resource_exists(env, Resource('dummy-realm', 'dummy-id')) is None
True
>>> resource_exists(env, Resource('dummy-realm'))
False
"""
try:
ResourceSystem(manager_for_neighborhood(env, resource.neighborhood))
except ResourceNotFound:
return False
manager = ResourceSystem(env).get_resource_manager(resource.realm)
if manager and hasattr(manager, 'resource_exists'):
return manager.resource_exists(resource)
elif resource.id is None:
return False
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/dist.py | trac/trac/dist.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Extra commands for setup.py.
In addition to providing a few extra command classes in `l10n_cmdclass`,
we also modify the standard `distutils.command.build` and
`setuptools.command.install_lib` classes so that the relevant l10n commands
for compiling catalogs are issued upon install.
"""
from __future__ import with_statement
from StringIO import StringIO
from itertools import izip
import os
import re
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from distutils import log
from distutils.cmd import Command
from distutils.command.build import build as _build
from distutils.errors import DistutilsOptionError
from setuptools.command.install_lib import install_lib as _install_lib
try:
from babel.messages.catalog import TranslationError
from babel.messages.extract import extract_javascript
from babel.messages.frontend import extract_messages, init_catalog, \
compile_catalog, update_catalog
from babel.messages.pofile import read_po
from babel.support import Translations
from babel.util import parse_encoding
_GENSHI_MARKUP_SEARCH = re.compile(r'\[[0-9]+:').search
_DEFAULT_KWARGS_MAPS = {
'Option': {'doc': 4},
'BoolOption': {'doc': 4},
'IntOption': {'doc': 4},
'FloatOption': {'doc': 4},
'ListOption': {'doc': 6},
'ChoiceOption': {'doc': 4},
'PathOption': {'doc': 4},
'ExtensionOption': {'doc': 5},
'OrderedExtensionsOption': {'doc': 6},
}
_DEFAULT_CLEANDOC_KEYWORDS = (
'ConfigSection', 'Option', 'BoolOption', 'IntOption', 'FloatOption',
'ListOption', 'ChoiceOption', 'PathOption', 'ExtensionOption',
'OrderedExtensionsOption', 'cleandoc_',
)
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code, This is patched
extract_python from Babel to support keyword argument mapping.
`kwargs_maps` option: names of keyword arguments will be mapping to
index of messages array.
`cleandoc_keywords` option: a list of keywords to clean up the
extracted messages with `cleandoc`.
"""
from trac.util.compat import cleandoc
funcname = lineno = message_lineno = None
kwargs_maps = func_kwargs_map = None
call_stack = -1
buf = []
messages = []
messages_kwargs = {}
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = parse_encoding(fileobj) \
or options.get('encoding', 'iso-8859-1')
kwargs_maps = _DEFAULT_KWARGS_MAPS.copy()
if 'kwargs_maps' in options:
kwargs_maps.update(options['kwargs_maps'])
cleandoc_keywords = set(_DEFAULT_CLEANDOC_KEYWORDS)
if 'cleandoc_keywords' in options:
cleandoc_keywords.update(options['cleandoc_keywords'])
tokens = generate_tokens(fileobj.readline)
tok = value = None
for _ in tokens:
prev_tok, prev_value = tok, value
tok, value, (lineno, _), _, _ = _
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
kwarg_name = None
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue
# appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
for name, message in messages_kwargs.iteritems():
if name not in func_kwargs_map:
continue
index = func_kwargs_map[name]
while index >= len(messages):
messages.append(None)
messages[index - 1] = message
if funcname in cleandoc_keywords:
messages = [m and cleandoc(m) for m in messages]
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
kwarg_name = func_kwargs_map = None
call_stack = -1
messages = []
messages_kwargs = {}
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == '=' and prev_tok == NAME:
kwarg_name = prev_value
elif tok == OP and value == ',':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
kwarg_name = None
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = func_kwargs_map = kwarg_name = None
elif tok == NAME and value in keywords:
funcname = value
func_kwargs_map = kwargs_maps.get(funcname, {})
kwarg_name = None
def extract_javascript_script(fileobj, keywords, comment_tags, options):
"""Extract messages from Javascript embedding in <script> tags.
Select <script type="javascript/text"> tags and delegate to
`extract_javascript`.
"""
from genshi.core import Stream
from genshi.input import XMLParser
out = StringIO()
stream = Stream(XMLParser(fileobj))
stream = stream.select('//script[@type="text/javascript"]')
stream.render(out=out, encoding='utf-8')
out.seek(0)
return extract_javascript(out, keywords, comment_tags, options)
class generate_messages_js(Command):
"""Generating message javascripts command for use ``setup.py`` scripts.
"""
description = 'generate message javascript files from binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-dir=', 'I',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'O',
"name of the output directory"),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>.js')"),
('locale=', 'l',
'locale of the catalog to compile'),
]
def initialize_options(self):
self.domain = 'messages'
self.input_dir = None
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
def finalize_options(self):
if not self.input_file and not self.input_dir:
raise DistutilsOptionError('you must specify either the input '
'file or directory')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify either the '
'output file or directory')
def run(self):
mo_files = []
js_files = []
def js_path(dir, locale):
return os.path.join(dir, locale + '.js')
if not self.input_file:
if self.locale:
mo_files.append((self.locale,
os.path.join(self.input_dir, self.locale,
'LC_MESSAGES',
self.domain + '.mo')))
js_files.append(js_path(self.output_dir, self.locale))
else:
for locale in os.listdir(self.input_dir):
mo_file = os.path.join(self.input_dir, locale,
'LC_MESSAGES',
self.domain + '.mo')
if os.path.exists(mo_file):
mo_files.append((locale, mo_file))
js_files.append(js_path(self.output_dir, locale))
else:
mo_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(js_path(self.output_dir, locale))
if not mo_files:
raise DistutilsOptionError('no compiled catalogs found')
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
for idx, (locale, mo_file) in enumerate(mo_files):
js_file = js_files[idx]
log.info('generating messages javascript %r to %r',
mo_file, js_file)
with open(mo_file, 'rb') as infile:
t = Translations(infile, self.domain)
catalog = t._catalog
with open(js_file, 'w') as outfile:
write_js(outfile, catalog, self.domain, locale)
class check_catalog(Command):
"""Check message catalog command for use ``setup.py`` scripts."""
description = 'check message catalog files, like `msgfmt --check`'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-dir=', 'I',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('locale=', 'l',
'locale of the catalog to compile'),
]
def initialize_options(self):
self.domain = 'messages'
self.input_dir = None
self.input_file = None
self.locale = None
def finalize_options(self):
if not self.input_file and not self.input_dir:
raise DistutilsOptionError('you must specify either the input '
'file or directory')
def run(self):
for filename in self._get_po_files():
log.info('checking catalog %s', filename)
f = open(filename)
try:
catalog = read_po(f, domain=self.domain)
finally:
f.close()
for message in catalog:
for error in self._check_message(catalog, message):
log.warn('%s:%d: %s', filename, message.lineno, error)
def _get_po_files(self):
if self.input_file:
return [self.input_file]
if self.locale:
return [os.path.join(self.input_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')]
files = []
for locale in os.listdir(self.input_dir):
filename = os.path.join(self.input_dir, locale, 'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(filename):
files.append(filename)
return sorted(files)
def _check_message(self, catalog, message):
errors = [e for e in message.check(catalog)]
try:
check_genshi_markup(catalog, message)
except TranslationError, e:
errors.append(e)
return errors
def check_genshi_markup(catalog, message):
"""Verify the genshi markups in the translation."""
msgids = message.id
if not isinstance(msgids, (list, tuple)):
msgids = (msgids,)
msgstrs = message.string
if not isinstance(msgstrs, (list, tuple)):
msgstrs = (msgstrs,)
# check using genshi-markup
if not _GENSHI_MARKUP_SEARCH(msgids[0]):
return
for msgid, msgstr in izip(msgids, msgstrs):
if msgstr:
_validate_genshi_markup(msgid, msgstr)
def _validate_genshi_markup(markup, alternative):
indices_markup = _parse_genshi_markup(markup)
indices_alternative = _parse_genshi_markup(alternative)
indices = indices_markup - indices_alternative
if indices:
raise TranslationError(
'genshi markups are unbalanced %s' % \
' '.join(['[%d:]' % idx for idx in indices]))
def _parse_genshi_markup(message):
from genshi.filters.i18n import parse_msg
try:
return set([idx for idx, text in parse_msg(message)
if idx > 0])
except Exception, e:
raise TranslationError('cannot parse message (%s: %s)' % \
(e.__class__.__name__, unicode(e)))
def write_js(fileobj, catalog, domain, locale):
from trac.util.presentation import to_json
data = {'domain': domain, 'locale': locale}
messages = {}
for msgid, msgstr in catalog.iteritems():
if isinstance(msgid, (list, tuple)):
messages.setdefault(msgid[0], {})
messages[msgid[0]][msgid[1]] = msgstr
elif msgid:
messages[msgid] = msgstr
else:
for line in msgstr.splitlines():
line = line.strip()
if not line:
continue
if ':' not in line:
continue
name, val = line.split(':', 1)
name = name.strip().lower()
if name == 'plural-forms':
data['plural_expr'] = pluralexpr(val)
break
data['messages'] = messages
fileobj.write('// Generated messages javascript file '
'from compiled MO file\n')
fileobj.write('babel.Translations.load(')
fileobj.write(to_json(data).encode('utf-8'))
fileobj.write(').install();\n')
def pluralexpr(forms):
match = re.search(r'\bplural\s*=\s*([^;]+)', forms)
if not match:
raise ValueError('Failed to parse plural_forms %r' % (forms,))
return match.group(1)
def get_command_overriders():
# 'bdist_wininst' runs a 'build', so make the latter
# run a 'compile_catalog' before 'build_py'
class build(_build):
sub_commands = [('compile_catalog', None)] + _build.sub_commands
# 'bdist_egg' isn't that nice, all it does is an 'install_lib'
class install_lib(_install_lib): # playing setuptools' own tricks ;-)
def l10n_run(self):
self.run_command('compile_catalog')
def run(self):
self.l10n_run()
_install_lib.run(self)
return build, install_lib
def get_l10n_cmdclass():
build, install_lib = get_command_overriders()
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
}
def get_l10n_js_cmdclass():
build, _install_lib = get_command_overriders()
build.sub_commands.insert(0, ('generate_messages_js', None))
build.sub_commands.insert(0, ('compile_catalog_js', None))
class install_lib(_install_lib):
def l10n_run(self):
self.run_command('compile_catalog_js')
self.run_command('generate_messages_js')
self.run_command('compile_catalog')
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
'extract_messages_js': extract_messages,
'init_catalog_js': init_catalog,
'compile_catalog_js': compile_catalog,
'update_catalog_js': update_catalog,
'generate_messages_js': generate_messages_js,
'check_catalog_js': check_catalog,
}
def get_l10n_trac_cmdclass():
build, _install_lib = get_command_overriders()
build.sub_commands.insert(0, ('generate_messages_js', None))
build.sub_commands.insert(0, ('compile_catalog_js', None))
build.sub_commands.insert(0, ('compile_catalog_tracini', None))
class install_lib(_install_lib):
def l10n_run(self):
self.run_command('compile_catalog_tracini')
self.run_command('compile_catalog_js')
self.run_command('generate_messages_js')
self.run_command('compile_catalog')
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
'extract_messages_js': extract_messages,
'init_catalog_js': init_catalog,
'compile_catalog_js': compile_catalog,
'update_catalog_js': update_catalog,
'generate_messages_js': generate_messages_js,
'check_catalog_js': check_catalog,
'extract_messages_tracini': extract_messages,
'init_catalog_tracini': init_catalog,
'compile_catalog_tracini': compile_catalog,
'update_catalog_tracini': update_catalog,
'check_catalog_tracini': check_catalog,
}
except ImportError:
def get_l10n_cmdclass():
return
def get_l10n_js_cmdclass():
return
def get_l10n_trac_cmdclass():
return
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/config.py | trac/trac/config.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
from ConfigParser import ConfigParser
from copy import deepcopy
import os.path
from trac.admin import AdminCommandError, IAdminCommandProvider
from trac.core import *
from trac.util import AtomicFile, as_bool
from trac.util.compat import cleandoc
from trac.util.text import printout, to_unicode, CRLF
from trac.util.translation import _, N_
__all__ = ['Configuration', 'ConfigSection', 'Option', 'BoolOption',
'IntOption', 'FloatOption', 'ListOption', 'ChoiceOption',
'PathOption', 'ExtensionOption', 'OrderedExtensionsOption',
'ConfigurationError']
# Retained for backward-compatibility, use as_bool() instead
_TRUE_VALUES = ('yes', 'true', 'enabled', 'on', 'aye', '1', 1, True)
_use_default = object()
def _to_utf8(basestr):
return to_unicode(basestr).encode('utf-8')
class ConfigurationError(TracError):
"""Exception raised when a value in the configuration file is not valid."""
title = N_('Configuration Error')
class Configuration(object):
"""Thin layer over `ConfigParser` from the Python standard library.
In addition to providing some convenience methods, the class remembers
the last modification time of the configuration file, and reparses it
when the file has changed.
"""
def __init__(self, filename, params={}):
self.filename = filename
self.parser = ConfigParser()
self._old_sections = {}
self.parents = []
self._lastmtime = 0
self._sections = {}
self.parse_if_needed(force=True)
def __contains__(self, name):
"""Return whether the configuration contains a section of the given
name.
"""
return name in self.sections()
def __getitem__(self, name):
"""Return the configuration section with the specified name."""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def get(self, section, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
return self[section].get(key, default)
def getbool(self, section, key, default=''):
"""Return the specified option as boolean value.
If the value of the option is one of "yes", "true", "enabled", "on",
or "1", this method wll return `True`, otherwise `False`.
Valid default input is a string or a bool. Returns a bool.
(since Trac 0.9.3, "enabled" added in 0.11)
"""
return self[section].getbool(key, default)
def getint(self, section, key, default=''):
"""Return the value of the specified option as integer.
If the specified option can not be converted to an integer, a
`ConfigurationError` exception is raised.
Valid default input is a string or an int. Returns an int.
(since Trac 0.10)
"""
return self[section].getint(key, default)
def getfloat(self, section, key, default=''):
"""Return the value of the specified option as float.
If the specified option can not be converted to a float, a
`ConfigurationError` exception is raised.
Valid default input is a string, float or int. Returns a float.
(since Trac 0.12)
"""
return self[section].getfloat(key, default)
def getlist(self, section, key, default='', sep=',', keep_empty=False):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a string.
(since Trac 0.10)
"""
return self[section].getlist(key, default, sep, keep_empty)
def getpath(self, section, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to the location of this
configuration file.
Valid default input is a string. Returns a normalized path.
(enabled since Trac 0.11.5)
"""
return self[section].getpath(key, default)
def set(self, section, key, value):
"""Change a configuration value.
These changes are not persistent unless saved with `save()`.
"""
self[section].set(key, value)
def defaults(self, compmgr=None):
"""Returns a dictionary of the default configuration values
(''since 0.10'').
If `compmgr` is specified, return only options declared in components
that are enabled in the given `ComponentManager`.
"""
defaults = {}
for (section, key), option in Option.get_registry(compmgr).items():
defaults.setdefault(section, {})[key] = option.default
return defaults
def options(self, section, compmgr=None):
"""Return a list of `(name, value)` tuples for every option in the
specified section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
return self[section].options(compmgr)
def remove(self, section, key):
"""Remove the specified option."""
self[section].remove(key)
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set([to_unicode(s) for s in self.parser.sections()])
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
trac.ini or one of the parents, or is available through the Option
registry.
(since Trac 0.11)
"""
section_str = _to_utf8(section)
if self.parser.has_section(section_str):
if _to_utf8(option) in self.parser.options(section_str):
return True
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
return True
return defaults and (section, option) in Option.registry
def save(self):
"""Write the configuration options to the primary file."""
if not self.filename:
return
# Only save options that differ from the defaults
sections = []
for section in self.sections():
section_str = _to_utf8(section)
options = []
for option in self[section]:
default_str = None
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
default_str = _to_utf8(parent.get(section, option))
break
option_str = _to_utf8(option)
current_str = False
if self.parser.has_option(section_str, option_str):
current_str = self.parser.get(section_str, option_str)
if current_str is not False and current_str != default_str:
options.append((option_str, current_str))
if options:
sections.append((section_str, sorted(options)))
# At this point, all the strings in `sections` are UTF-8 encoded `str`
try:
with AtomicFile(self.filename, 'w') as fileobj:
fileobj.write('# -*- coding: utf-8 -*-\n\n')
for section, options in sections:
fileobj.write('[%s]\n' % section)
for key_str, val_str in options:
if to_unicode(key_str) in self[section].overridden:
fileobj.write('# %s = <inherited>\n' % key_str)
else:
val_str = val_str.replace(CRLF, '\n') \
.replace('\n', '\n ')
fileobj.write('%s = %s\n' % (key_str, val_str))
fileobj.write('\n')
self._old_sections = deepcopy(self.parser._sections)
except Exception:
# Revert all changes to avoid inconsistencies
self.parser._sections = deepcopy(self._old_sections)
raise
def parse_if_needed(self, force=False):
if not self.filename or not os.path.isfile(self.filename):
return False
changed = False
modtime = os.path.getmtime(self.filename)
if force or modtime > self._lastmtime:
self._sections = {}
self.parser._sections = {}
if not self.parser.read(self.filename):
raise TracError(_("Error reading '%(file)s', make sure it is "
"readable.", file=self.filename))
self._lastmtime = modtime
self._old_sections = deepcopy(self.parser._sections)
changed = True
if changed:
self.parents = []
if self.parser.has_option('inherit', 'file'):
for filename in self.parser.get('inherit', 'file').split(','):
filename = to_unicode(filename.strip())
if not os.path.isabs(filename):
filename = os.path.join(os.path.dirname(self.filename),
filename)
self.parents.append(Configuration(filename))
else:
for parent in self.parents:
changed |= parent.parse_if_needed(force=force)
if changed:
self._cache = {}
return changed
def touch(self):
if self.filename and os.path.isfile(self.filename) \
and os.access(self.filename, os.W_OK):
os.utime(self.filename, None)
def set_defaults(self, compmgr=None):
"""Retrieve all default values and store them explicitly in the
configuration, so that they can be saved to file.
Values already set in the configuration are not overridden.
"""
for section, default_options in self.defaults(compmgr).items():
for name, value in default_options.items():
if not self.parser.has_option(_to_utf8(section),
_to_utf8(name)):
if any(parent[section].contains(name, defaults=False)
for parent in self.parents):
value = None
self.set(section, name, value)
class Section(object):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', 'overridden', '_cache']
def __init__(self, config, name):
self.config = config
self.name = name
self.overridden = {}
self._cache = {}
def contains(self, key, defaults=True):
if self.config.parser.has_option(_to_utf8(self.name), _to_utf8(key)):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and Option.registry.has_key((self.name, key))
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
for option_str in self.config.parser.options(name_str):
option = to_unicode(option_str)
options.add(option.lower())
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = option.lower()
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).keys():
if section == self.name and option.lower() not in options:
yield option
__iter__ = iterate
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if self.config.parser.has_option(name_str, key_str):
value = self.config.parser.get(name_str, key_str)
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option.default if option else _use_default
else:
value = _use_default
if value is _use_default:
return default
if not value:
value = u''
elif isinstance(value, basestring):
value = to_unicode(value)
self._cache[key] = value
return value
def getbool(self, key, default=''):
"""Return the value of the specified option as boolean.
This method returns `True` if the option value is one of "yes", "true",
"enabled", "on", or non-zero numbers, ignoring case. Otherwise `False`
is returned.
Valid default input is a string or a bool. Returns a bool.
"""
return as_bool(self.get(key, default))
def getint(self, key, default=''):
"""Return the value of the specified option as integer.
If the specified option can not be converted to an integer, a
`ConfigurationError` exception is raised.
Valid default input is a string or an int. Returns an int.
"""
value = self.get(key, default)
if not value:
return 0
try:
return int(value)
except ValueError:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected integer, got %(value)s',
section=self.name, entry=key, value=repr(value)))
def getfloat(self, key, default=''):
"""Return the value of the specified option as float.
If the specified option can not be converted to a float, a
`ConfigurationError` exception is raised.
Valid default input is a string, float or int. Returns a float.
"""
value = self.get(key, default)
if not value:
return 0.0
try:
return float(value)
except ValueError:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected float, got %(value)s',
section=self.name, entry=key, value=repr(value)))
def getlist(self, key, default='', sep=',', keep_empty=True):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `False`, empty elements are omitted
from the list.
Valid default input is a string or a list. Returns a list.
"""
value = self.get(key, default)
if not value:
return []
if isinstance(value, basestring):
items = [item.strip() for item in value.split(sep)]
else:
items = list(value)
if not keep_empty:
items = [item for item in items if item not in (None, '')]
return items
def getpath(self, key, default=''):
"""Return the value of the specified option as a path, relative to
the location of this configuration file.
Valid default input is a string. Returns a normalized path.
"""
path = self.get(key, default)
if not path:
return default
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.config.filename), path)
return os.path.normcase(os.path.realpath(path))
def options(self, compmgr=None):
"""Return `(key, value)` tuples for every option in the section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
for key in self.iterate(compmgr):
yield key, self.get(key)
def set(self, key, value):
"""Change a configuration value.
These changes are not persistent unless saved with `save()`.
"""
self._cache.pop(key, None)
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if not self.config.parser.has_section(name_str):
self.config.parser.add_section(name_str)
if value is None:
self.overridden[key] = True
value_str = ''
else:
value_str = _to_utf8(value)
return self.config.parser.set(name_str, key_str, value_str)
def remove(self, key):
"""Delete a key from this section.
Like for `set()`, the changes won't persist until `save()` gets called.
"""
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
self._cache.pop(key, None)
self.config.parser.remove_option(_to_utf8(self.name), _to_utf8(key))
def _get_registry(cls, compmgr=None):
"""Return the descriptor registry.
If `compmgr` is specified, only return descriptors for components that
are enabled in the given `ComponentManager`.
"""
if compmgr is None:
return cls.registry
from trac.core import ComponentMeta
components = {}
for comp in ComponentMeta._components:
for attr in comp.__dict__.itervalues():
if isinstance(attr, cls):
components[attr] = comp
return dict(each for each in cls.registry.iteritems()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
class ConfigSection(object):
"""Descriptor for configuration sections."""
registry = {}
@staticmethod
def get_registry(compmgr=None):
"""Return the section registry, as a `dict` mapping section names to
`ConfigSection` objects.
If `compmgr` is specified, only return sections for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(ConfigSection, compmgr)
def __init__(self, name, doc, doc_domain='tracini'):
"""Create the configuration section."""
self.name = name
self.registry[self.name] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
return config[self.name]
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
class Option(object):
"""Descriptor for configuration options."""
registry = {}
def accessor(self, section, name, default):
return section.get(name, default)
@staticmethod
def get_registry(compmgr=None):
"""Return the option registry, as a `dict` mapping `(section, key)`
tuples to `Option` objects.
If `compmgr` is specified, only return options for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(Option, compmgr)
def __init__(self, section, name, default=None, doc='',
doc_domain='tracini'):
"""Create the configuration option.
@param section: the name of the configuration section this option
belongs to
@param name: the name of the option
@param default: the default value for the option
@param doc: documentation of the option
"""
self.section = section
self.name = name
self.default = default
self.registry[(self.section, self.name)] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
section = config[self.section]
value = self.accessor(section, self.name, self.default)
return value
def __set__(self, instance, value):
raise AttributeError, 'can\'t set attribute'
def __repr__(self):
return '<%s [%s] "%s">' % (self.__class__.__name__, self.section,
self.name)
class BoolOption(Option):
"""Descriptor for boolean configuration options."""
def accessor(self, section, name, default):
return section.getbool(name, default)
class IntOption(Option):
"""Descriptor for integer configuration options."""
def accessor(self, section, name, default):
return section.getint(name, default)
class FloatOption(Option):
"""Descriptor for float configuration options."""
def accessor(self, section, name, default):
return section.getfloat(name, default)
class ListOption(Option):
"""Descriptor for configuration options that contain multiple values
separated by a specific character.
"""
def __init__(self, section, name, default=None, sep=',', keep_empty=False,
doc='', doc_domain='tracini'):
Option.__init__(self, section, name, default, doc, doc_domain)
self.sep = sep
self.keep_empty = keep_empty
def accessor(self, section, name, default):
return section.getlist(name, default, self.sep, self.keep_empty)
class ChoiceOption(Option):
"""Descriptor for configuration options providing a choice among a list
of items.
The default value is the first choice in the list.
"""
def __init__(self, section, name, choices, doc='', doc_domain='tracini'):
Option.__init__(self, section, name, _to_utf8(choices[0]), doc,
doc_domain)
self.choices = set(_to_utf8(choice).strip() for choice in choices)
def accessor(self, section, name, default):
value = section.get(name, default)
if value not in self.choices:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected one of '
'(%(choices)s), got %(value)s',
section=section.name, entry=name, value=repr(value),
choices=', '.join('"%s"' % c
for c in sorted(self.choices))))
return value
class PathOption(Option):
"""Descriptor for file system path configuration options.
Relative paths are resolved to absolute paths using the directory
containing the configuration file as the reference.
"""
def accessor(self, section, name, default):
return section.getpath(name, default)
class ExtensionOption(Option):
def __init__(self, section, name, interface, default=None, doc='',
doc_domain='tracini'):
Option.__init__(self, section, name, default, doc, doc_domain)
self.xtnpt = ExtensionPoint(interface)
def __get__(self, instance, owner):
if instance is None:
return self
value = Option.__get__(self, instance, owner)
for impl in self.xtnpt.extensions(instance):
if impl.__class__.__name__ == value:
return impl
raise AttributeError('Cannot find an implementation of the "%s" '
'interface named "%s". Please update the option '
'%s.%s in trac.ini.'
% (self.xtnpt.interface.__name__, value,
self.section, self.name))
class OrderedExtensionsOption(ListOption):
"""A comma separated, ordered, list of components implementing `interface`.
Can be empty.
If `include_missing` is true (the default) all components implementing the
interface are returned, with those specified by the option ordered first."""
def __init__(self, section, name, interface, default=None,
include_missing=True, doc='', doc_domain='tracini'):
ListOption.__init__(self, section, name, default, doc=doc,
doc_domain=doc_domain)
self.xtnpt = ExtensionPoint(interface)
self.include_missing = include_missing
def __get__(self, instance, owner):
if instance is None:
return self
order = ListOption.__get__(self, instance, owner)
components = []
for impl in self.xtnpt.extensions(instance):
if self.include_missing or impl.__class__.__name__ in order:
components.append(impl)
def compare(x, y):
x, y = x.__class__.__name__, y.__class__.__name__
if x not in order:
return int(y in order)
if y not in order:
return -int(x in order)
return cmp(order.index(x), order.index(y))
components.sort(compare)
return components
class ConfigurationAdmin(Component):
"""trac-admin command provider for trac.ini administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('config get', '<section> <option>',
'Get the value of the given option in "trac.ini"',
self._complete_config, self._do_get)
yield ('config remove', '<section> <option>',
'Remove the specified option from "trac.ini"',
self._complete_config, self._do_remove)
yield ('config set', '<section> <option> <value>',
'Set the value for the given option in "trac.ini"',
self._complete_config, self._do_set)
def _complete_config(self, args):
if len(args) == 1:
return self.config.sections()
elif len(args) == 2:
return [name for (name, value) in self.config[args[0]].options()]
def _do_get(self, section, option):
if not self.config.has_option(section, option):
raise AdminCommandError(
_("Option '%(option)s' doesn't exist in section '%(section)s'",
option=option, section=section))
printout(self.config.get(section, option))
def _do_set(self, section, option, value):
self.config.set(section, option, value)
self.config.save()
if section == 'inherit' and option == 'file':
self.config.parse_if_needed(force=True) # Full reload
def _do_remove(self, section, option):
if not self.config.has_option(section, option):
raise AdminCommandError(
_("Option '%(option)s' doesn't exist in section '%(section)s'",
option=option, section=section))
self.config.remove(section, option)
self.config.save()
if section == 'inherit' and option == 'file':
self.config.parse_if_needed(force=True) # Full reload
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/__init__.py | trac/trac/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2012 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution('Trac').version
except DistributionNotFound:
__version__ = '1.0.1'
try:
from hooks import install_global_hooks
install_global_hooks()
except:
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/notification.py | trac/trac/notification.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005-2006 Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import re
import smtplib
from subprocess import Popen, PIPE
import time
from genshi.builder import tag
from trac import __version__
from trac.config import BoolOption, ExtensionOption, IntOption, Option
from trac.core import *
from trac.util.compat import close_fds
from trac.util.text import CRLF, fix_eol
from trac.util.translation import _, deactivate, reactivate
MAXHEADERLEN = 76
EMAIL_LOOKALIKE_PATTERN = (
# the local part
r"[a-zA-Z0-9.'+_-]+" '@'
# the domain name part (RFC:1035)
'(?:[a-zA-Z0-9_-]+\.)+' # labels (but also allow '_')
'[a-zA-Z](?:[-a-zA-Z\d]*[a-zA-Z\d])?' # TLD
)
class IEmailSender(Interface):
"""Extension point interface for components that allow sending e-mail."""
def send(self, from_addr, recipients, message):
"""Send message to recipients."""
class NotificationSystem(Component):
email_sender = ExtensionOption('notification', 'email_sender',
IEmailSender, 'SmtpEmailSender',
"""Name of the component implementing `IEmailSender`.
This component is used by the notification system to send emails.
Trac currently provides `SmtpEmailSender` for connecting to an SMTP
server, and `SendmailEmailSender` for running a `sendmail`-compatible
executable. (''since 0.12'')""")
smtp_enabled = BoolOption('notification', 'smtp_enabled', 'false',
"""Enable email notification.""")
smtp_from = Option('notification', 'smtp_from', 'trac@localhost',
"""Sender address to use in notification emails.""")
smtp_from_name = Option('notification', 'smtp_from_name', '',
"""Sender name to use in notification emails.""")
smtp_from_author = BoolOption('notification', 'smtp_from_author', 'false',
"""Use the action author as the sender of notification emails.
(''since 1.0'')""")
smtp_replyto = Option('notification', 'smtp_replyto', 'trac@localhost',
"""Reply-To address to use in notification emails.""")
smtp_always_cc = Option('notification', 'smtp_always_cc', '',
"""Email address(es) to always send notifications to,
addresses can be seen by all recipients (Cc:).""")
smtp_always_bcc = Option('notification', 'smtp_always_bcc', '',
"""Email address(es) to always send notifications to,
addresses do not appear publicly (Bcc:). (''since 0.10'')""")
smtp_default_domain = Option('notification', 'smtp_default_domain', '',
"""Default host/domain to append to address that do not specify
one.""")
ignore_domains = Option('notification', 'ignore_domains', '',
"""Comma-separated list of domains that should not be considered
part of email addresses (for usernames with Kerberos domains).""")
admit_domains = Option('notification', 'admit_domains', '',
"""Comma-separated list of domains that should be considered as
valid for email addresses (such as localdomain).""")
mime_encoding = Option('notification', 'mime_encoding', 'none',
"""Specifies the MIME encoding scheme for emails.
Valid options are 'base64' for Base64 encoding, 'qp' for
Quoted-Printable, and 'none' for no encoding, in which case mails will
be sent as 7bit if the content is all ASCII, or 8bit otherwise.
(''since 0.10'')""")
use_public_cc = BoolOption('notification', 'use_public_cc', 'false',
"""Recipients can see email addresses of other CC'ed recipients.
If this option is disabled (the default), recipients are put on BCC.
(''since 0.10'')""")
use_short_addr = BoolOption('notification', 'use_short_addr', 'false',
"""Permit email address without a host/domain (i.e. username only).
The SMTP server should accept those addresses, and either append
a FQDN or use local delivery. (''since 0.10'')""")
smtp_subject_prefix = Option('notification', 'smtp_subject_prefix',
'__default__',
"""Text to prepend to subject line of notification emails.
If the setting is not defined, then the [$project_name] prefix.
If no prefix is desired, then specifying an empty option
will disable it. (''since 0.10.1'')""")
def send_email(self, from_addr, recipients, message):
"""Send message to recipients via e-mail."""
self.email_sender.send(from_addr, recipients, message)
class SmtpEmailSender(Component):
"""E-mail sender connecting to an SMTP server."""
implements(IEmailSender)
smtp_server = Option('notification', 'smtp_server', 'localhost',
"""SMTP server hostname to use for email notifications.""")
smtp_port = IntOption('notification', 'smtp_port', 25,
"""SMTP server port to use for email notification.""")
smtp_user = Option('notification', 'smtp_user', '',
"""Username for SMTP server. (''since 0.9'')""")
smtp_password = Option('notification', 'smtp_password', '',
"""Password for SMTP server. (''since 0.9'')""")
use_tls = BoolOption('notification', 'use_tls', 'false',
"""Use SSL/TLS to send notifications over SMTP. (''since 0.10'')""")
def send(self, from_addr, recipients, message):
# Ensure the message complies with RFC2822: use CRLF line endings
message = fix_eol(message, CRLF)
self.log.info("Sending notification through SMTP at %s:%d to %s"
% (self.smtp_server, self.smtp_port, recipients))
server = smtplib.SMTP(self.smtp_server, self.smtp_port)
# server.set_debuglevel(True)
if self.use_tls:
server.ehlo()
if not server.esmtp_features.has_key('starttls'):
raise TracError(_("TLS enabled but server does not support " \
"TLS"))
server.starttls()
server.ehlo()
if self.smtp_user:
server.login(self.smtp_user.encode('utf-8'),
self.smtp_password.encode('utf-8'))
start = time.time()
server.sendmail(from_addr, recipients, message)
t = time.time() - start
if t > 5:
self.log.warning('Slow mail submission (%.2f s), '
'check your mail setup' % t)
if self.use_tls:
# avoid false failure detection when the server closes
# the SMTP connection with TLS enabled
import socket
try:
server.quit()
except socket.sslerror:
pass
else:
server.quit()
class SendmailEmailSender(Component):
"""E-mail sender using a locally-installed sendmail program."""
implements(IEmailSender)
sendmail_path = Option('notification', 'sendmail_path', 'sendmail',
"""Path to the sendmail executable.
The sendmail program must accept the `-i` and `-f` options.
(''since 0.12'')""")
def send(self, from_addr, recipients, message):
# Use native line endings in message
message = fix_eol(message, os.linesep)
self.log.info("Sending notification through sendmail at %s to %s"
% (self.sendmail_path, recipients))
cmdline = [self.sendmail_path, "-i", "-f", from_addr]
cmdline.extend(recipients)
self.log.debug("Sendmail command line: %s" % cmdline)
child = Popen(cmdline, bufsize=-1, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=close_fds)
out, err = child.communicate(message)
if child.returncode or err:
raise Exception("Sendmail failed with (%s, %s), command: '%s'"
% (child.returncode, err.strip(), cmdline))
class Notify(object):
"""Generic notification class for Trac.
Subclass this to implement different methods.
"""
def __init__(self, env):
self.env = env
self.config = env.config
from trac.web.chrome import Chrome
self.template = Chrome(self.env).load_template(self.template_name,
method='text')
# FIXME: actually, we would need a different
# PermissionCache for each recipient
self.data = Chrome(self.env).populate_data(None, {'CRLF': CRLF})
def notify(self, resid):
(torcpts, ccrcpts) = self.get_recipients(resid)
self.begin_send()
self.send(torcpts, ccrcpts)
self.finish_send()
def get_recipients(self, resid):
"""Return a pair of list of subscribers to the resource 'resid'.
First list represents the direct recipients (To:), second list
represents the recipients in carbon copy (Cc:).
"""
raise NotImplementedError
def begin_send(self):
"""Prepare to send messages.
Called before sending begins.
"""
def send(self, torcpts, ccrcpts):
"""Send message to recipients."""
raise NotImplementedError
def finish_send(self):
"""Clean up after sending all messages.
Called after sending all messages.
"""
class NotifyEmail(Notify):
"""Baseclass for notification by email."""
from_email = 'trac+tickets@localhost'
subject = ''
template_name = None
nodomaddr_re = re.compile(r'[\w\d_\.\-]+')
addrsep_re = re.compile(r'[;\s,]+')
def __init__(self, env):
Notify.__init__(self, env)
addrfmt = EMAIL_LOOKALIKE_PATTERN
admit_domains = self.env.config.get('notification', 'admit_domains')
if admit_domains:
pos = addrfmt.find('@')
domains = '|'.join([x.strip() for x in \
admit_domains.replace('.','\.').split(',')])
addrfmt = r'%s@(?:(?:%s)|%s)' % (addrfmt[:pos], addrfmt[pos+1:],
domains)
self.shortaddr_re = re.compile(r'\s*(%s)\s*$' % addrfmt)
self.longaddr_re = re.compile(r'^\s*(.*)\s+<\s*(%s)\s*>\s*$' % addrfmt)
self._init_pref_encoding()
domains = self.env.config.get('notification', 'ignore_domains', '')
self._ignore_domains = [x.strip() for x in domains.lower().split(',')]
# Get the name and email addresses of all known users
self.name_map = {}
self.email_map = {}
for username, name, email in self.env.get_known_users():
if name:
self.name_map[username] = name
if email:
self.email_map[username] = email
def _init_pref_encoding(self):
from email.Charset import Charset, QP, BASE64, SHORTEST
self._charset = Charset()
self._charset.input_charset = 'utf-8'
self._charset.output_charset = 'utf-8'
self._charset.input_codec = 'utf-8'
self._charset.output_codec = 'utf-8'
pref = self.env.config.get('notification', 'mime_encoding').lower()
if pref == 'base64':
self._charset.header_encoding = BASE64
self._charset.body_encoding = BASE64
elif pref in ['qp', 'quoted-printable']:
self._charset.header_encoding = QP
self._charset.body_encoding = QP
elif pref == 'none':
self._charset.header_encoding = SHORTEST
self._charset.body_encoding = None
else:
raise TracError(_('Invalid email encoding setting: %(pref)s',
pref=pref))
def notify(self, resid, subject, author=None):
self.subject = subject
config = self.config['notification']
if not config.getbool('smtp_enabled'):
return
from_email, from_name = '', ''
if author and config.getbool('smtp_from_author'):
from_email = self.get_smtp_address(author)
if from_email:
from_name = self.name_map.get(author, '')
if not from_name:
mo = self.longaddr_re.search(author)
if mo:
from_name = mo.group(1)
if not from_email:
from_email = config.get('smtp_from')
from_name = config.get('smtp_from_name') or self.env.project_name
self.replyto_email = config.get('smtp_replyto')
self.from_email = from_email or self.replyto_email
self.from_name = from_name
if not self.from_email and not self.replyto_email:
raise TracError(tag(
tag.p(_('Unable to send email due to identity crisis.')),
tag.p(_('Neither %(from_)s nor %(reply_to)s are specified '
'in the configuration.',
from_=tag.b('notification.from'),
reply_to=tag.b('notification.reply_to')))),
_('SMTP Notification Error'))
Notify.notify(self, resid)
def format_header(self, key, name, email=None):
from email.Header import Header
maxlength = MAXHEADERLEN-(len(key)+2)
# Do not sent ridiculous short headers
if maxlength < 10:
raise TracError(_("Header length is too short"))
try:
tmp = name.encode('ascii')
header = Header(tmp, 'ascii', maxlinelen=maxlength)
except UnicodeEncodeError:
header = Header(name, self._charset, maxlinelen=maxlength)
if not email:
return header
else:
return '"%s" <%s>' % (header, email)
def add_headers(self, msg, headers):
for h in headers:
msg[h] = self.encode_header(h, headers[h])
def get_smtp_address(self, address):
if not address:
return None
def is_email(address):
pos = address.find('@')
if pos == -1:
return False
if address[pos+1:].lower() in self._ignore_domains:
return False
return True
if address == 'anonymous':
return None
if address in self.email_map:
address = self.email_map[address]
elif not is_email(address) and NotifyEmail.nodomaddr_re.match(address):
if self.config.getbool('notification', 'use_short_addr'):
return address
domain = self.config.get('notification', 'smtp_default_domain')
if domain:
address = "%s@%s" % (address, domain)
else:
self.env.log.info("Email address w/o domain: %s" % address)
return None
mo = self.shortaddr_re.search(address)
if mo:
return mo.group(1)
mo = self.longaddr_re.search(address)
if mo:
return mo.group(2)
self.env.log.info("Invalid email address: %s" % address)
return None
def encode_header(self, key, value):
if isinstance(value, tuple):
return self.format_header(key, value[0], value[1])
mo = self.longaddr_re.match(value)
if mo:
return self.format_header(key, mo.group(1), mo.group(2))
return self.format_header(key, value)
def send(self, torcpts, ccrcpts, mime_headers={}):
from email.MIMEText import MIMEText
from email.Utils import formatdate
stream = self.template.generate(**self.data)
# don't translate the e-mail stream
t = deactivate()
try:
body = stream.render('text', encoding='utf-8')
finally:
reactivate(t)
public_cc = self.config.getbool('notification', 'use_public_cc')
headers = {}
headers['X-Mailer'] = 'Trac %s, by Edgewall Software' % __version__
headers['X-Trac-Version'] = __version__
headers['X-Trac-Project'] = self.env.project_name
headers['X-URL'] = self.env.project_url
headers['Precedence'] = 'bulk'
headers['Auto-Submitted'] = 'auto-generated'
headers['Subject'] = self.subject
headers['From'] = (self.from_name, self.from_email) if self.from_name \
else self.from_email
headers['Reply-To'] = self.replyto_email
def build_addresses(rcpts):
"""Format and remove invalid addresses"""
return filter(lambda x: x, \
[self.get_smtp_address(addr) for addr in rcpts])
def remove_dup(rcpts, all):
"""Remove duplicates"""
tmp = []
for rcpt in rcpts:
if not rcpt in all:
tmp.append(rcpt)
all.append(rcpt)
return (tmp, all)
toaddrs = build_addresses(torcpts)
ccaddrs = build_addresses(ccrcpts)
accparam = self.config.get('notification', 'smtp_always_cc')
accaddrs = accparam and \
build_addresses(accparam.replace(',', ' ').split()) or []
bccparam = self.config.get('notification', 'smtp_always_bcc')
bccaddrs = bccparam and \
build_addresses(bccparam.replace(',', ' ').split()) or []
recipients = []
(toaddrs, recipients) = remove_dup(toaddrs, recipients)
(ccaddrs, recipients) = remove_dup(ccaddrs, recipients)
(accaddrs, recipients) = remove_dup(accaddrs, recipients)
(bccaddrs, recipients) = remove_dup(bccaddrs, recipients)
# if there is not valid recipient, leave immediately
if len(recipients) < 1:
self.env.log.info('no recipient for a ticket notification')
return
pcc = accaddrs
if public_cc:
pcc += ccaddrs
if toaddrs:
headers['To'] = ', '.join(toaddrs)
if pcc:
headers['Cc'] = ', '.join(pcc)
headers['Date'] = formatdate()
msg = MIMEText(body, 'plain')
# Message class computes the wrong type from MIMEText constructor,
# which does not take a Charset object as initializer. Reset the
# encoding type to force a new, valid evaluation
del msg['Content-Transfer-Encoding']
msg.set_charset(self._charset)
self.add_headers(msg, headers)
self.add_headers(msg, mime_headers)
NotificationSystem(self.env).send_email(self.from_email, recipients,
msg.as_string())
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/about.py | trac/trac/about.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
from genshi.builder import tag
from trac.core import *
from trac.loader import get_plugin_info
from trac.perm import IPermissionRequestor
from trac.util.translation import _
from trac.web import IRequestHandler
from trac.web.chrome import INavigationContributor
class AboutModule(Component):
""""About Trac" page provider, showing version information from
third-party packages, as well as configuration information."""
required = True
implements(INavigationContributor, IPermissionRequestor, IRequestHandler)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'about'
def get_navigation_items(self, req):
yield ('metanav', 'about',
tag.a(_('About Trac'), href=req.href.about()))
# IPermissionRequestor methods
def get_permission_actions(self):
return ['CONFIG_VIEW']
# IRequestHandler methods
def match_request(self, req):
return re.match(r'/about(?:_trac)?(?:/.+)?$', req.path_info)
def process_request(self, req):
data = {'systeminfo': None, 'plugins': None, 'config': None}
if 'CONFIG_VIEW' in req.perm('config', 'systeminfo'):
# Collect system information
data['systeminfo'] = self.env.get_systeminfo()
if 'CONFIG_VIEW' in req.perm('config', 'plugins'):
# Collect plugin information
data['plugins'] = get_plugin_info(self.env)
if 'CONFIG_VIEW' in req.perm('config', 'ini'):
# Collect config information
defaults = self.config.defaults(self.compmgr)
sections = []
for section in self.config.sections(self.compmgr):
options = []
default_options = defaults.get(section, {})
for name, value in self.config.options(section, self.compmgr):
default = default_options.get(name) or ''
options.append({
'name': name, 'value': value,
'modified': unicode(value) != unicode(default)
})
options.sort(key=lambda o: o['name'])
sections.append({'name': section, 'options': options})
sections.sort(key=lambda s: s['name'])
data['config'] = sections
return 'about.html', data, None
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/test.py | trac/trac/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import doctest
import os
import unittest
import sys
try:
from babel import Locale
locale_en = Locale.parse('en_US')
except ImportError:
locale_en = None
from trac.config import Configuration
from trac.core import Component, ComponentManager
from trac.env import Environment
from trac.db.api import _parse_db_str, DatabaseManager
from trac.db.sqlite_backend import SQLiteConnection
from trac.db.util import ConnectionWrapper
import trac.db.postgres_backend
import trac.db.mysql_backend
from trac.ticket.default_workflow import load_workflow_config_snippet
from trac.util import translation
def Mock(bases=(), *initargs, **kw):
"""
Simple factory for dummy classes that can be used as replacement for the
real implementation in tests.
Base classes for the mock can be specified using the first parameter, which
must be either a tuple of class objects or a single class object. If the
bases parameter is omitted, the base class of the mock will be object.
So to create a mock that is derived from the builtin dict type, you can do:
>>> mock = Mock(dict)
>>> mock['foo'] = 'bar'
>>> mock['foo']
'bar'
Attributes of the class are provided by any additional keyword parameters.
>>> mock = Mock(foo='bar')
>>> mock.foo
'bar'
Objects produces by this function have the special feature of not requiring
the 'self' parameter on methods, because you should keep data at the scope
of the test function. So you can just do:
>>> mock = Mock(add=lambda x,y: x+y)
>>> mock.add(1, 1)
2
To access attributes from the mock object from inside a lambda function,
just access the mock itself:
>>> mock = Mock(dict, do=lambda x: 'going to the %s' % mock[x])
>>> mock['foo'] = 'bar'
>>> mock.do('foo')
'going to the bar'
Because assignments or other types of statements don't work in lambda
functions, assigning to a local variable from a mock function requires some
extra work:
>>> myvar = [None]
>>> mock = Mock(set=lambda x: myvar.__setitem__(0, x))
>>> mock.set(1)
>>> myvar[0]
1
"""
if not isinstance(bases, tuple):
bases = (bases,)
cls = type('Mock', bases, {})
mock = cls(*initargs)
for k, v in kw.items():
setattr(mock, k, v)
return mock
class MockPerm(object):
"""Fake permission class. Necessary as Mock can not be used with operator
overloading."""
username = ''
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
return True
__contains__ = has_permission
def __call__(self, realm_or_resource, id=False, version=False):
return self
def require(self, action, realm_or_resource=None, id=False, version=False):
pass
assert_permission = require
class TestSetup(unittest.TestSuite):
"""
Test suite decorator that allows a fixture to be setup for a complete
suite of test cases.
"""
def setUp(self):
"""Sets up the fixture, and sets self.fixture if needed"""
pass
def tearDown(self):
"""Tears down the fixture"""
pass
def run(self, result):
"""Setup the fixture (self.setUp), call .setFixture on all the tests,
and tear down the fixture (self.tearDown)."""
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite.run(self, result)
self.tearDown()
return result
def _wrapped_run(self, *args, **kwargs):
"Python 2.7 / unittest2 compatibility - there must be a better way..."
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite._wrapped_run(self, *args, **kwargs)
self.tearDown()
class TestCaseSetup(unittest.TestCase):
def setFixture(self, fixture):
self.fixture = fixture
# -- Database utilities
def get_dburi():
dburi = os.environ.get('TRAC_TEST_DB_URI')
if dburi:
scheme, db_prop = _parse_db_str(dburi)
# Assume the schema 'tractest' for Postgres
if scheme == 'postgres' and \
not db_prop.get('params', {}).get('schema'):
if '?' in dburi:
dburi += "&schema=tractest"
else:
dburi += "?schema=tractest"
return dburi
return 'sqlite::memory:'
def reset_sqlite_db(env, db_prop):
dbname = os.path.basename(db_prop['path'])
with env.db_transaction as db:
tables = db("SELECT name FROM sqlite_master WHERE type='table'")
for table in tables:
db("DELETE FROM %s" % table)
return tables
def reset_postgres_db(env, db_prop):
with env.db_transaction as db:
dbname = db.schema
if dbname:
# reset sequences
# information_schema.sequences view is available in PostgreSQL 8.2+
# however Trac supports PostgreSQL 8.0+, uses
# pg_get_serial_sequence()
for seq in db("""
SELECT sequence_name FROM (
SELECT pg_get_serial_sequence(%s||table_name,
column_name)
AS sequence_name
FROM information_schema.columns
WHERE table_schema=%s) AS tab
WHERE sequence_name IS NOT NULL""",
(dbname + '.', dbname)):
db("ALTER SEQUENCE %s RESTART WITH 1" % seq)
# clear tables
tables = db("""SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,))
for table in tables:
db("DELETE FROM %s" % table)
# PostgreSQL supports TRUNCATE TABLE as well
# (see http://www.postgresql.org/docs/8.1/static/sql-truncate.html)
# but on the small tables used here, DELETE is actually much faster
return tables
def reset_mysql_db(env, db_prop):
dbname = os.path.basename(db_prop['path'])
if dbname:
with env.db_transaction as db:
tables = db("""SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,))
for table in tables:
# TRUNCATE TABLE is prefered to DELETE FROM, as we need to reset
# the auto_increment in MySQL.
db("TRUNCATE TABLE %s" % table)
return tables
# -- Environment stub
class EnvironmentStub(Environment):
"""A stub of the trac.env.Environment object for testing."""
href = abs_href = None
global_databasemanager = None
def __init__(self, default_data=False, enable=None, disable=None,
path=None, destroying=False):
"""Construct a new Environment stub object.
:param default_data: If True, populate the database with some
defaults.
:param enable: A list of component classes or name globs to
activate in the stub environment.
"""
ComponentManager.__init__(self)
Component.__init__(self)
self.systeminfo = []
import trac
self.path = path
if self.path is None:
self.path = os.path.dirname(trac.__file__)
if not os.path.isabs(self.path):
self.path = os.path.join(os.getcwd(), self.path)
# -- configuration
self.config = Configuration(None)
# We have to have a ticket-workflow config for ''lots'' of things to
# work. So insert the basic-workflow config here. There may be a
# better solution than this.
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.set('logging', 'log_level', 'DEBUG')
self.config.set('logging', 'log_type', 'stderr')
if enable is not None:
self.config.set('components', 'trac.*', 'disabled')
else:
self.config.set('components', 'tracopt.versioncontrol.svn.*',
'enabled')
for name_or_class in enable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'enabled')
for name_or_class in disable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'disabled')
# -- logging
from trac.log import logger_handler_factory
self.log, self._log_handler = logger_handler_factory('test')
# -- database
self.config.set('components', 'trac.db.*', 'enabled')
self.dburi = get_dburi()
init_global = False
if self.global_databasemanager:
self.components[DatabaseManager] = self.global_databasemanager
else:
self.config.set('trac', 'database', self.dburi)
self.global_databasemanager = DatabaseManager(self)
self.config.set('trac', 'debug_sql', True)
self.config.set('logging', 'log_type', 'stderr')
self.config.set('logging', 'log_level', 'DEBUG')
init_global = not destroying
if default_data or init_global:
self.reset_db(default_data)
from trac.web.href import Href
self.href = Href('/trac.cgi')
self.abs_href = Href('http://example.org/trac.cgi')
self.known_users = []
translation.activate(locale_en)
def reset_db(self, default_data=None):
"""Remove all data from Trac tables, keeping the tables themselves.
:param default_data: after clean-up, initialize with default data
:return: True upon success
"""
from trac import db_default
scheme, db_prop = _parse_db_str(self.dburi)
tables = []
remove_sqlite_db = False
try:
with self.db_transaction as db:
db.rollback() # make sure there's no transaction in progress
# check the database version
database_version = db(
"SELECT value FROM system WHERE name='database_version'")
if database_version:
database_version = int(database_version[0][0])
if database_version == db_default.db_version:
# same version, simply clear the tables (faster)
m = sys.modules[__name__]
reset_fn = 'reset_%s_db' % scheme
if hasattr(m, reset_fn):
tables = getattr(m, reset_fn)(self, db_prop)
else:
# different version or version unknown, drop the tables
remove_sqlite_db = True
self.destroy_db(scheme, db_prop)
except Exception, e:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
db = None # as we might shutdown the pool FIXME no longer needed!
if scheme == 'sqlite' and remove_sqlite_db:
path = db_prop['path']
if path != ':memory:':
if not os.path.isabs(path):
path = os.path.join(self.path, path)
self.global_databasemanager.shutdown()
os.remove(path)
if not tables:
self.global_databasemanager.init_db()
# we need to make sure the next get_db_cnx() will re-create
# a new connection aware of the new data model - see #8518.
if self.dburi != 'sqlite::memory:':
self.global_databasemanager.shutdown()
with self.db_transaction as db:
if default_data:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols),
','.join(['%s' for c in cols])),
vals)
else:
db("INSERT INTO system (name, value) VALUES (%s, %s)",
('database_version', str(db_default.db_version)))
def destroy_db(self, scheme=None, db_prop=None):
if not (scheme and db_prop):
scheme, db_prop = _parse_db_str(self.dburi)
try:
with self.db_transaction as db:
if scheme == 'postgres' and db.schema:
db('DROP SCHEMA "%s" CASCADE' % db.schema)
elif scheme == 'mysql':
dbname = os.path.basename(db_prop['path'])
for table in db("""
SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,)):
db("DROP TABLE IF EXISTS `%s`" % table)
except Exception:
# "TracError: Database not found...",
# psycopg2.ProgrammingError: schema "tractest" does not exist
pass
return False
# overriden
def is_component_enabled(self, cls):
if self._component_name(cls).startswith('__main__.'):
return True
return Environment.is_component_enabled(self, cls)
def get_known_users(self, cnx=None):
return self.known_users
def locate(fn):
"""Locates a binary on the path.
Returns the fully-qualified path, or None.
"""
exec_suffix = '.exe' if os.name == 'nt' else ''
for p in ["."] + os.environ['PATH'].split(os.pathsep):
f = os.path.join(p, fn + exec_suffix)
if os.path.exists(f):
return f
return None
INCLUDE_FUNCTIONAL_TESTS = True
def suite():
import trac.tests
import trac.admin.tests
import trac.db.tests
import trac.mimeview.tests
import trac.ticket.tests
import trac.util.tests
import trac.versioncontrol.tests
import trac.versioncontrol.web_ui.tests
import trac.web.tests
import trac.wiki.tests
import tracopt.mimeview.tests
import tracopt.perm.tests
import tracopt.versioncontrol.git.tests
import tracopt.versioncontrol.svn.tests
suite = unittest.TestSuite()
suite.addTest(trac.tests.basicSuite())
if INCLUDE_FUNCTIONAL_TESTS:
suite.addTest(trac.tests.functionalSuite())
suite.addTest(trac.admin.tests.suite())
suite.addTest(trac.db.tests.suite())
suite.addTest(trac.mimeview.tests.suite())
suite.addTest(trac.ticket.tests.suite())
suite.addTest(trac.util.tests.suite())
suite.addTest(trac.versioncontrol.tests.suite())
suite.addTest(trac.versioncontrol.web_ui.tests.suite())
suite.addTest(trac.web.tests.suite())
suite.addTest(trac.wiki.tests.suite())
suite.addTest(tracopt.mimeview.tests.suite())
suite.addTest(tracopt.perm.tests.suite())
suite.addTest(tracopt.versioncontrol.git.tests.suite())
suite.addTest(tracopt.versioncontrol.svn.tests.suite())
suite.addTest(doctest.DocTestSuite(sys.modules[__name__]))
return suite
if __name__ == '__main__':
#FIXME: this is a bit inelegant
if '--skip-functional-tests' in sys.argv:
sys.argv.remove('--skip-functional-tests')
INCLUDE_FUNCTIONAL_TESTS = False
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/cache.py | trac/trac/cache.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from __future__ import with_statement
from .core import Component
from .util import arity
from .util.concurrency import ThreadLocal, threading
__all__ = ['CacheManager', 'cached']
_id_to_key = {}
def key_to_id(s):
"""Return a hash of the given property key."""
# This is almost the same algorithm as Python's string hash,
# except we only keep a 31-bit result.
result = ord(s[0]) << 7 if s else 0
for c in s:
result = ((1000003 * result) & 0x7fffffff) ^ ord(c)
result ^= len(s)
_id_to_key[result] = s
return result
class CachedPropertyBase(object):
"""Base class for cached property descriptors"""
def __init__(self, retriever):
self.retriever = retriever
self.__doc__ = retriever.__doc__
def make_key(self, cls):
attr = self.retriever.__name__
for base in cls.mro():
if base.__dict__.get(attr) is self:
cls = base
break
return '%s.%s.%s' % (cls.__module__, cls.__name__, attr)
class CachedSingletonProperty(CachedPropertyBase):
"""Cached property descriptor for classes behaving as singletons
in the scope of one `~trac.env.Environment` instance.
This means there will be no more than one cache to monitor in the
database for this kind of cache. Therefore, using only "static"
information for the key is enough. For the same reason it is also
safe to store the corresponding id as a property of the descriptor
instance.
"""
def __get__(self, instance, owner):
if instance is None:
return self
try:
id = self.id
except AttributeError:
id = self.id = key_to_id(self.make_key(owner))
return CacheManager(instance.env).get(id, self.retriever, instance)
def __delete__(self, instance):
try:
id = self.id
except AttributeError:
id = self.id = key_to_id(self.make_key(instance.__class__))
CacheManager(instance.env).invalidate(id)
class CachedProperty(CachedPropertyBase):
"""Cached property descriptor for classes having potentially
multiple instances associated to a single `~trac.env.Environment`
instance.
As we'll have potentiall many different caches to monitor for this
kind of cache, the key needs to be augmented by a string unique to
each instance of the owner class. As the resulting id will be
different for each instance of the owner class, we can't store it
as a property of the descriptor class, so we store it back in the
attribute used for augmenting the key (``key_attr``).
"""
def __init__(self, retriever, key_attr):
super(CachedProperty, self).__init__(retriever)
self.key_attr = key_attr
def __get__(self, instance, owner):
if instance is None:
return self
id = getattr(instance, self.key_attr)
if isinstance(id, str):
id = key_to_id(self.make_key(owner) + ':' + id)
setattr(instance, self.key_attr, id)
return CacheManager(instance.env).get(id, self.retriever, instance)
def __delete__(self, instance):
id = getattr(instance, self.key_attr)
if isinstance(id, str):
id = key_to_id(self.make_key(instance.__class__) + ':' + id)
setattr(instance, self.key_attr, id)
CacheManager(instance.env).invalidate(id)
def cached(fn_or_attr=None):
"""Method decorator creating a cached attribute from a data
retrieval method.
Accessing the cached attribute gives back the cached value. The
data retrieval method is transparently called by the
`CacheManager` on first use after the program start or after the
cache has been invalidated. Invalidating the cache for this value
is done by ``del``\ eting the attribute.
Note that the cache validity is maintained using the `cache` table
in the database. Cache invalidation is performed within a
transaction block, and can be nested within another transaction
block.
When the decorator is used in a class for which instances behave
as singletons within the scope of a given `~trac.env.Environment`
(typically `~trac.core.Component` classes), the key used to
identify the attribute in the database is constructed from the
names of the containing module, class and retriever method::
class WikiSystem(Component):
@cached
def pages(self):
return set(name for name, in self.env.db_query(
"SELECT DISTINCT name FROM wiki"))
Otherwise, when the decorator is used in non-"singleton" objects,
a string specifying the name of an attribute containing a string
unique to the instance must be passed to the decorator. This value
will be appended to the key constructed from module, class and
method name::
class SomeClass(object):
def __init__(self, env, name):
self.env = env
self.name = name
self._metadata_id = name
@cached('_metadata_id')
def metadata(self):
...
Note that in this case the key attribute is overwritten with a
hash of the key on first access, so it should not be used for any
other purpose.
In either case, this decorator requires that the object on which
it is used has an ``env`` attribute containing the application
`~trac.env.Environment`.
.. versionchanged:: 1.0
The data retrieval method used to be called with a single
argument ``db`` containing a reference to a database
connection. This is the same connection that can be retrieved
via the normal `~trac.env.Environment.db_query` or
`~trac.env.Environment.db_transaction`, so this is no longer
needed, though methods supporting that argument are still
supported (but will be removed in version 1.1.1).
"""
if hasattr(fn_or_attr, '__call__'):
return CachedSingletonProperty(fn_or_attr)
def decorator(fn):
return CachedProperty(fn, fn_or_attr)
return decorator
class CacheManager(Component):
"""Cache manager."""
required = True
def __init__(self):
self._cache = {}
self._local = ThreadLocal(meta=None, cache=None)
self._lock = threading.RLock()
# Public interface
def reset_metadata(self):
"""Reset per-request cache metadata."""
self._local.meta = self._local.cache = None
def get(self, id, retriever, instance):
"""Get cached or fresh data for the given id."""
# Get cache metadata
local_meta = self._local.meta
local_cache = self._local.cache
if local_meta is None:
# First cache usage in this request, retrieve cache metadata
# from the database and make a thread-local copy of the cache
meta = self.env.db_query("SELECT id, generation FROM cache")
self._local.meta = local_meta = dict(meta)
self._local.cache = local_cache = self._cache.copy()
db_generation = local_meta.get(id, -1)
# Try the thread-local copy first
try:
(data, generation) = local_cache[id]
if generation == db_generation:
return data
except KeyError:
pass
with self.env.db_query as db:
with self._lock:
# Get data from the process cache
try:
(data, generation) = local_cache[id] = self._cache[id]
if generation == db_generation:
return data
except KeyError:
generation = None # Force retrieval from the database
# Check if the process cache has the newest version, as it may
# have been updated after the metadata retrieval
for db_generation, in db(
"SELECT generation FROM cache WHERE id=%s", (id,)):
break
else:
db_generation = -1
if db_generation == generation:
return data
# Retrieve data from the database
if arity(retriever) == 2:
data = retriever(instance, db)
else:
data = retriever(instance)
local_cache[id] = self._cache[id] = (data, db_generation)
local_meta[id] = db_generation
return data
def invalidate(self, id):
"""Invalidate cached data for the given id."""
with self.env.db_transaction as db:
with self._lock:
# Invalidate in other processes
# The row corresponding to the cache may not exist in the table
# yet.
# - If the row exists, the UPDATE increments the generation,
# the SELECT returns a row and we're done.
# - If the row doesn't exist, the UPDATE does nothing, but
# starts a transaction. The SELECT then returns nothing,
# and we can safely INSERT a new row.
db("UPDATE cache SET generation=generation+1 WHERE id=%s",
(id,))
if not db("SELECT generation FROM cache WHERE id=%s", (id,)):
db("INSERT INTO cache VALUES (%s, %s, %s)",
(id, 0, _id_to_key.get(id, '<unknown>')))
# Invalidate in this process
self._cache.pop(id, None)
# Invalidate in this thread
try:
del self._local.cache[id]
except (KeyError, TypeError):
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/hooks.py | trac/trac/hooks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import abc
import os
import pkg_resources
from trac.env import open_environment
from trac.util import exception_to_unicode
from trac.util.concurrency import threading
from trac.web.api import RequestDone
from trac.web.href import Href
from trac.web.main import RequestWithSession
__all__ = ['environment_factory', 'install_global_hooks']
class EnvironmentFactoryBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def open_environment(self, environ, env_path, global_env, use_cache=False):
pass
class RequestFactoryBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def create_request(self, env, environ, start_response):
pass
def load_class(fqn):
try:
pkg, resource = fqn.rsplit('.', 1)
except ValueError:
class_ = None
else:
try:
module = __import__(pkg, fromlist=[resource])
except ImportError:
class_ = None
else:
try:
class_ = getattr(module, resource)
except AttributeError:
class_ = None
return class_
_global_hooks_installed = False
_global_hooks_lock = threading.Lock()
def install_global_hooks():
global _global_hooks_installed, _global_hooks_lock
if _global_hooks_installed:
return
_global_hooks_lock.acquire()
try:
if not _global_hooks_installed:
try:
# TODO: this is currently hardcoded, maybe it could be made
# configurable in the future
import multiproduct.hooks
except:
pass
_global_hooks_installed = True
finally:
_global_hooks_lock.release()
return
def environment_factory(env):
return load_class(env.config.get('trac', 'environment_factory'))
def request_factory(env):
return load_class(env.config.get('trac', 'request_factory'))
class BootstrapHandlerBase(object):
"""Objects responsible for loading the target environment and
request objects used in subsequent dispatching.
"""
def open_environment(self, environ, start_response):
"""Load and initialize target Trac environment involved in request
dispatching.
The following WSGI entries will also be present in `environ` dict:
||= WSGI variable =||= Environment variable =||= Comment =||
|| trac.env_path || TRAC_ENV || See wiki:TracModWSGI ||
|| trac.env_parent_dir || TRAC_ENV_PARENT_DIR || See wiki:TracModWSGI||
|| trac.env_index_template || TRAC_ENV_INDEX_TEMPLATE || See wiki:TracInterfaceCustomization ||
|| trac.template_vars || TRAC_TEMPLATE_VARS || See wiki:TracInterfaceCustomization ||
|| trac.locale || || Target locale ||
|| trac.base_url || TRAC_BASE_URL || Trac base URL hint ||
A new entry named 'trac.env_name' identifying environment SHOULD be
added (e.g. used by tracd to choose authentication realms).
As a side-effect the WSGI environment dict (i.e. `environ`) may be
modified in many different ways to prepare it for subsequent
dispatching.
This method may handle the request (e.g. render environment index page)
in case environment lookup yields void results. In that case it MUST
invoke WSGI `write` callable returned by `start_response` and raise
`trac.web.api.RequestDone` exception.
:param environ: WSGI environment dict
:param start_response: WSGI callback for starting the response
:return: environment object
:throws RequestDone: if the request is fully processed while loading
target environment e.g. environment index page
:throws EnvironmentError: if it is impossible to find a way to locate
target environment e.g. TRAC_ENV and
TRAC_ENV_PARENT_DIR both missing
:throws Exception: any other exception will be processed by the caller
in order to send a generic error message back to
the HTTP client
"""
raise NotImplementedError("Must override method 'open_environment'")
def default_probe_environment(self, environ):
"""By default it will invoke `open_environment` and discard the
resulting environment object. This approach is generic but not
efficient. Should be overridden whenever possible.
"""
# If the expected configuration keys aren't found in the WSGI
# environment, try looking them up in the process environment variables
environ.setdefault('trac.env_path', os.getenv('TRAC_ENV'))
environ.setdefault('trac.env_parent_dir',
os.getenv('TRAC_ENV_PARENT_DIR'))
environ.setdefault('trac.env_index_template',
os.getenv('TRAC_ENV_INDEX_TEMPLATE'))
environ.setdefault('trac.template_vars',
os.getenv('TRAC_TEMPLATE_VARS'))
environ.setdefault('trac.locale', '')
environ.setdefault('trac.base_url',
os.getenv('TRAC_BASE_URL'))
try:
self.open_environment(environ,
lambda status, headers: (lambda data: None))
except Exception:
# Handle all exceptions; else potential HTTP protocol violation
pass
def probe_environment(self, environ):
"""This method is aimed at providing a lightweight version of
`open_environment` by solely applying upon `environ` the side effects
needed to dispatch the request in environment context.
By default it will invoke `open_environment` and discard the
resulting environment object. Specialized versions will have the chance
to implement more efficient strategies in case environment
instantiation may be avoided.
:return: None
"""
self.default_probe_environment(environ)
def create_request(self, env, environ, start_response):
"""Instantiate request object used in subsequent request dispatching
:param env: target Trac environment returned by `open_environment`
:param environ: WSGI environment dict
:param start_response: WSGI callback for starting the response
"""
raise NotImplementedError("Must override method 'create_request'")
class DefaultBootstrapHandler(BootstrapHandlerBase):
"""Default bootstrap handler
- Load environment based on URL path.
- Instantiate RequestWithSession
Notice: This class is a straightforward refactoring of factories
implementation.
"""
global_env = None
def open_environment(self, environ, start_response):
env_path = environ.get('trac.env_path')
if env_path:
environ['trac.env_name'] = os.path.basename(env_path)
else:
env_parent_dir = environ.get('trac.env_parent_dir')
env_paths = environ.get('trac.env_paths')
if env_parent_dir or env_paths:
# The first component of the path is the base name of the
# environment
path_info = environ.get('PATH_INFO', '').lstrip('/').split('/')
env_name = path_info.pop(0)
if not env_name:
# No specific environment requested, so render an
# environment index page
send_project_index(environ, start_response, env_parent_dir,
env_paths)
raise RequestDone
environ['trac.env_name'] = env_name
errmsg = None
# To make the matching patterns of request handlers work, we
# append the environment name to the `SCRIPT_NAME` variable,
# and keep only the remaining path in the `PATH_INFO`
# variable.
script_name = environ.get('SCRIPT_NAME', '')
try:
script_name = unicode(script_name, 'utf-8')
# (as Href expects unicode parameters)
environ['SCRIPT_NAME'] = Href(script_name)(env_name)
environ['PATH_INFO'] = '/' + '/'.join(path_info)
if env_parent_dir:
env_path = os.path.join(env_parent_dir, env_name)
else:
env_path = get_environments(environ).get(env_name)
if not env_path or not os.path.isdir(env_path):
errmsg = 'Environment not found'
except UnicodeDecodeError:
errmsg = 'Invalid URL encoding (was %r)' % script_name
if errmsg:
write = start_response('404 Not Found',
[('Content-Type', 'text/plain'),
('Content-Length',
str(len(errmsg)))])
write(errmsg)
raise RequestDone
if not env_path:
raise EnvironmentError('The environment options "TRAC_ENV" or '
'"TRAC_ENV_PARENT_DIR" or the mod_python '
'options "TracEnv" or "TracEnvParentDir" '
'are missing. Trac requires one of these '
'options to locate the Trac '
'environment(s).')
run_once = environ['wsgi.run_once']
try:
global_env = open_environment(env_path, use_cache=not run_once)
factory = environment_factory(global_env)
factory_env = factory().open_environment(environ, env_path,
global_env,
use_cache=not run_once) \
if factory else None
except Exception:
raise
else:
self.global_env = global_env
env = factory_env if factory_env else global_env
return env
def create_request(self, env, environ, start_response):
factory = None
try:
factory = request_factory(self.global_env)
except Exception:
pass
return factory().create_request(env, environ, start_response) \
if factory else RequestWithSession(environ, start_response)
default_bootstrap_handler = DefaultBootstrapHandler()
def load_bootstrap_handler(bootstrap_ep, log=None):
"""Load handler for environment lookup and instantiation of request objects
:param bootstrap_ep: entry point specification
:param log: file-like object used to report errors
"""
bootstrap = None
if bootstrap_ep:
try:
ep = pkg_resources.EntryPoint.parse('x = ' + bootstrap_ep)
bootstrap = ep.load(require=False)
except Exception, e:
if log:
log.write("[FAIL] [Trac] entry point '%s'. Reason %s" %
(bootstrap_ep, repr(exception_to_unicode(e))))
if bootstrap is None:
bootstrap = default_bootstrap_handler
return bootstrap
# Recursive imports
from trac.web.main import send_project_index, get_environments
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/perm.py | trac/trac/perm.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import csv
import os
from time import time
from trac.admin import AdminCommandError, IAdminCommandProvider, get_dir_list
from trac.cache import cached
from trac.config import ExtensionOption, OrderedExtensionsOption
from trac.core import *
from trac.resource import get_resource_name, manager_for_neighborhood, \
Neighborhood, Resource
from trac.util import file_or_std
from trac.util.text import path_to_unicode, print_table, printout, \
stream_encoding, to_unicode, wrap
from trac.util.translation import _
__all__ = ['IPermissionRequestor', 'IPermissionStore', 'IPermissionPolicy',
'IPermissionGroupProvider', 'PermissionError', 'PermissionSystem']
class PermissionError(StandardError):
"""Insufficient permissions to complete the operation"""
def __init__ (self, action=None, resource=None, env=None, msg=None):
StandardError.__init__(self)
self.action = action
self.resource = resource
self.env = env
self.msg = msg
def __unicode__ (self):
if self.action:
if self.resource:
return _('%(perm)s privileges are required to perform '
'this operation on %(resource)s. You don\'t have the '
'required permissions.',
perm=self.action,
resource=get_resource_name(self.env, self.resource))
else:
return _('%(perm)s privileges are required to perform this '
'operation. You don\'t have the required '
'permissions.', perm=self.action)
elif self.msg:
return self.msg
else:
return _('Insufficient privileges to perform this operation.')
class IPermissionRequestor(Interface):
"""Extension point interface for components that define actions."""
def get_permission_actions():
"""Return a list of actions defined by this component.
The items in the list may either be simple strings, or
`(string, sequence)` tuples. The latter are considered to be "meta
permissions" that group several simple actions under one name for
convenience, adding to it if another component already defined that
name.
"""
class IPermissionStore(Interface):
"""Extension point interface for components that provide storage and
management of permissions."""
def get_user_permissions(username):
"""Return all permissions for the user with the specified name.
The permissions are returned as a dictionary where the key is the name
of the permission, and the value is either `True` for granted
permissions or `False` for explicitly denied permissions."""
def get_users_with_permissions(permissions):
"""Retrieve a list of users that have any of the specified permissions.
Users are returned as a list of usernames.
"""
def get_all_permissions():
"""Return all permissions for all users.
The permissions are returned as a list of (subject, action)
formatted tuples."""
def grant_permission(username, action):
"""Grant a user permission to perform an action."""
def revoke_permission(username, action):
"""Revokes the permission of the given user to perform an action."""
class IPermissionGroupProvider(Interface):
"""Extension point interface for components that provide information about
user groups.
"""
def get_permission_groups(username):
"""Return a list of names of the groups that the user with the specified
name is a member of."""
class IPermissionPolicy(Interface):
"""A security policy provider used for fine grained permission checks."""
def check_permission(action, username, resource, perm):
"""Check that the action can be performed by username on the resource
:param action: the name of the permission
:param username: the username string or 'anonymous' if there's no
authenticated user
:param resource: the resource on which the check applies.
Will be `None`, if the check is a global one and
not made on a resource in particular
:param perm: the permission cache for that username and resource,
which can be used for doing secondary checks on other
permissions. Care must be taken to avoid recursion.
:return: `True` if action is allowed, `False` if action is denied,
or `None` if indifferent. If `None` is returned, the next
policy in the chain will be used, and so on.
Note that when checking a permission on a realm resource (i.e. when
`.id` is `None`), this usually corresponds to some preliminary check
done before making a fine-grained check on some resource.
Therefore the `IPermissionPolicy` should be conservative and return:
* `True` if the action *can* be allowed for some resources in
that realm. Later, for specific resource, the policy will be able
to return `True` (allow), `False` (deny) or `None` (don't decide).
* `None` if the action *can not* be performed for *some* resources.
This corresponds to situation where the policy is only interested
in returning `False` or `None` on specific resources.
* `False` if the action *can not* be performed for *any* resource in
that realm (that's a very strong decision as that will usually
prevent any fine-grained check to even happen).
Note that performing permission checks on realm resources may seem
redundant for now as the action name itself contains the realm, but
this will probably change in the future (e.g. `'VIEW' in ...`).
"""
class DefaultPermissionStore(Component):
"""Default implementation of permission storage and group management.
This component uses the `permission` table in the database to store both
permissions and groups.
"""
implements(IPermissionStore)
group_providers = ExtensionPoint(IPermissionGroupProvider)
def get_user_permissions(self, username):
"""Retrieve the permissions for the given user and return them in a
dictionary.
The permissions are stored in the database as (username, action)
records. There's simple support for groups by using lowercase names for
the action column: such a record represents a group and not an actual
permission, and declares that the user is part of that group.
"""
subjects = set([username])
for provider in self.group_providers:
subjects.update(provider.get_permission_groups(username) or [])
actions = set()
perms = self._all_permissions
while True:
num_users = len(subjects)
num_actions = len(actions)
for user, action in perms:
if user in subjects:
if action.isupper() and action not in actions:
actions.add(action)
if not action.isupper() and action not in subjects:
# action is actually the name of the permission
# group here
subjects.add(action)
if num_users == len(subjects) and num_actions == len(actions):
break
return list(actions)
def get_users_with_permissions(self, permissions):
"""Retrieve a list of users that have any of the specified permissions
Users are returned as a list of usernames.
"""
# get_user_permissions() takes care of the magic 'authenticated' group.
# The optimized loop we had before didn't. This is very inefficient,
# but it works.
result = set()
users = set([u[0] for u in self.env.get_known_users()])
for user in users:
userperms = self.get_user_permissions(user)
for group in permissions:
if group in userperms:
result.add(user)
return list(result)
def get_all_permissions(self):
"""Return all permissions for all users.
The permissions are returned as a list of (subject, action)
formatted tuples."""
return self._all_permissions
@cached
def _all_permissions(self):
return [(username, action) for username, action in
self.env.db_query("SELECT username, action FROM permission")]
def grant_permission(self, username, action):
"""Grants a user the permission to perform the specified action."""
self.env.db_transaction("INSERT INTO permission VALUES (%s, %s)",
(username, action))
self.log.info("Granted permission for %s to %s", action, username)
# Invalidate cached property
del self._all_permissions
def revoke_permission(self, username, action):
"""Revokes a users' permission to perform the specified action."""
self.env.db_transaction(
"DELETE FROM permission WHERE username=%s AND action=%s",
(username, action))
self.log.info("Revoked permission for %s to %s", action, username)
# Invalidate cached property
del self._all_permissions
class DefaultPermissionGroupProvider(Component):
"""Permission group provider providing the basic builtin permission groups
'anonymous' and 'authenticated'."""
required = True
implements(IPermissionGroupProvider)
def get_permission_groups(self, username):
groups = ['anonymous']
if username and username != 'anonymous':
groups.append('authenticated')
return groups
class DefaultPermissionPolicy(Component):
"""Default permission policy using the IPermissionStore system."""
implements(IPermissionPolicy)
# Number of seconds a cached user permission set is valid for.
CACHE_EXPIRY = 5
# How frequently to clear the entire permission cache
CACHE_REAP_TIME = 60
def __init__(self):
self.permission_cache = {}
self.last_reap = time()
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
# TODO: Precondition resource.neighborhood is None
now = time()
if now - self.last_reap > self.CACHE_REAP_TIME:
self.permission_cache = {}
self.last_reap = time()
timestamp, permissions = self.permission_cache.get(username, (0, None))
# Cache hit?
if now - timestamp > self.CACHE_EXPIRY:
# No, pull permissions from database.
permissions = PermissionSystem(self.env). \
get_user_permissions(username)
self.permission_cache[username] = (now, permissions)
return action in permissions or None
class PermissionSystem(Component):
"""Permission management sub-system."""
required = True
implements(IPermissionRequestor)
requestors = ExtensionPoint(IPermissionRequestor)
store = ExtensionOption('trac', 'permission_store', IPermissionStore,
'DefaultPermissionStore',
"""Name of the component implementing `IPermissionStore`, which is used
for managing user and group permissions.""")
policies = OrderedExtensionsOption('trac', 'permission_policies',
IPermissionPolicy,
'DefaultPermissionPolicy, LegacyAttachmentPolicy',
False,
"""List of components implementing `IPermissionPolicy`, in the order in
which they will be applied. These components manage fine-grained access
control to Trac resources.
Defaults to the DefaultPermissionPolicy (pre-0.11 behavior) and
LegacyAttachmentPolicy (map ATTACHMENT_* permissions to realm specific
ones)""")
# Number of seconds a cached user permission set is valid for.
CACHE_EXPIRY = 5
# How frequently to clear the entire permission cache
CACHE_REAP_TIME = 60
def __init__(self):
self.permission_cache = {}
self.last_reap = time()
# Public API
def grant_permission(self, username, action):
"""Grant the user with the given name permission to perform to specified
action."""
if action.isupper() and action not in self.get_actions():
raise TracError(_('%(name)s is not a valid action.', name=action))
self.store.grant_permission(username, action)
def revoke_permission(self, username, action):
"""Revokes the permission of the specified user to perform an action."""
self.store.revoke_permission(username, action)
def get_actions_dict(self):
"""Get all actions from permission requestors as a `dict`.
The keys are the action names. The values are the additional actions
granted by each action. For simple actions, this is an empty list.
For meta actions, this is the list of actions covered by the action.
"""
actions = {}
for requestor in self.requestors:
for action in requestor.get_permission_actions() or []:
if isinstance(action, tuple):
actions.setdefault(action[0], []).extend(action[1])
else:
actions.setdefault(action, [])
return actions
def get_actions(self, skip=None):
"""Get a list of all actions defined by permission requestors."""
actions = set()
for requestor in self.requestors:
if requestor is skip:
continue
for action in requestor.get_permission_actions() or []:
if isinstance(action, tuple):
actions.add(action[0])
else:
actions.add(action)
return list(actions)
def get_user_permissions(self, username=None):
"""Return the permissions of the specified user.
The return value is a dictionary containing all the actions granted to
the user mapped to `True`. If an action is missing as a key, or has
`False` as a value, permission is denied."""
if not username:
# Return all permissions available in the system
return dict.fromkeys(self.get_actions(), True)
# Return all permissions that the given user has
actions = self.get_actions_dict()
permissions = {}
def expand_meta(action):
if action not in permissions:
permissions[action] = True
for a in actions.get(action, ()):
expand_meta(a)
for perm in self.store.get_user_permissions(username) or []:
expand_meta(perm)
return permissions
def get_all_permissions(self):
"""Return all permissions for all users.
The permissions are returned as a list of (subject, action)
formatted tuples."""
return self.store.get_all_permissions() or []
def get_users_with_permission(self, permission):
"""Return all users that have the specified permission.
Users are returned as a list of user names.
"""
now = time()
if now - self.last_reap > self.CACHE_REAP_TIME:
self.permission_cache = {}
self.last_reap = now
timestamp, permissions = self.permission_cache.get(permission,
(0, None))
if now - timestamp <= self.CACHE_EXPIRY:
return permissions
parent_map = {}
for parent, children in self.get_actions_dict().iteritems():
for child in children:
parent_map.setdefault(child, set()).add(parent)
satisfying_perms = set()
def append_with_parents(action):
if action not in satisfying_perms:
satisfying_perms.add(action)
for action in parent_map.get(action, ()):
append_with_parents(action)
append_with_parents(permission)
perms = self.store.get_users_with_permissions(satisfying_perms) or []
self.permission_cache[permission] = (now, perms)
return perms
def expand_actions(self, actions):
"""Helper method for expanding all meta actions."""
all_actions = self.get_actions_dict()
expanded_actions = set()
def expand_action(action):
if action not in expanded_actions:
expanded_actions.add(action)
for a in all_actions.get(action, ()):
expand_action(a)
for a in actions:
expand_action(a)
return expanded_actions
def check_permission(self, action, username=None, resource=None, perm=None):
"""Return True if permission to perform action for the given resource
is allowed."""
if username is None:
username = 'anonymous'
if resource:
if resource.realm is None:
resource = None
elif resource.neighborhood is not None:
try:
compmgr = manager_for_neighborhood(self.env,
resource.neighborhood)
except ResourceNotFound:
#FIXME: raise ?
return False
else:
return PermissionSystem(compmgr).check_permission(
action, username, resource, perm)
for policy in self.policies:
decision = policy.check_permission(action, username, resource,
perm)
if decision is not None:
if not decision:
self.log.debug("%s denies %s performing %s on %r",
policy.__class__.__name__, username,
action, resource)
return decision
self.log.debug("No policy allowed %s performing %s on %r",
username, action, resource)
return False
# IPermissionRequestor methods
def get_permission_actions(self):
"""Implement the global `TRAC_ADMIN` meta permission.
Implements also the `EMAIL_VIEW` permission which allows for
showing email addresses even if `[trac] show_email_addresses`
is `false`.
"""
actions = self.get_actions(skip=self)
actions.append('EMAIL_VIEW')
return [('TRAC_ADMIN', actions), 'EMAIL_VIEW']
class PermissionCache(object):
"""Cache that maintains the permissions of a single user.
Permissions are usually checked using the following syntax:
'WIKI_MODIFY' in perm
One can also apply more fine grained permission checks and
specify a specific resource for which the permission should be available:
'WIKI_MODIFY' in perm('wiki', 'WikiStart')
If there's already a `page` object available, the check is simply:
'WIKI_MODIFY' in perm(page.resource)
If instead of a check, one wants to assert that a given permission is
available, the following form should be used:
perm.require('WIKI_MODIFY')
or
perm('wiki', 'WikiStart').require('WIKI_MODIFY')
or
perm(page.resource).require('WIKI_MODIFY')
When using `require`, a `PermissionError` exception is raised if the
permission is missing.
"""
__slots__ = ('env', 'username', '_resource', '_cache')
def __init__(self, env, username=None, resource=None, cache=None,
groups=None):
if resource and resource.neighborhood is not None:
env = manager_for_neighborhood(env, resource.neighborhood)
resource = Neighborhood(None, None).child(resource)
self.env = env
self.username = username or 'anonymous'
self._resource = resource
if cache is None:
cache = {}
self._cache = cache
def _normalize_resource(self, realm_or_resource, id, version):
if realm_or_resource:
return Resource(realm_or_resource, id, version)
else:
return self._resource
def __call__(self, realm_or_resource, id=False, version=False):
"""Convenience function for using thus:
'WIKI_VIEW' in perm(context)
or
'WIKI_VIEW' in perm(realm, id, version)
or
'WIKI_VIEW' in perm(resource)
"""
resource = Resource(realm_or_resource, id, version)
if resource and self._resource and resource == self._resource:
return self
else:
return PermissionCache(self.env, self.username, resource,
self._cache)
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
resource = self._normalize_resource(realm_or_resource, id, version)
return self._has_permission(action, resource)
def _has_permission(self, action, resource):
key = (self.username, hash(resource), action)
cached = self._cache.get(key)
if cached:
cache_decision, cache_resource = cached
if resource == cache_resource:
return cache_decision
perm = self
permsys = PermissionSystem(self.env)
if resource is not self._resource:
if resource.neighborhood is not None:
perm = PermissionCache(self.env, self.username, resource, {})
permsys = PermissionSystem(manager_for_neighborhood(
self.env, resource.neighborhood))
else:
perm = PermissionCache(self.env, self.username, resource,
self._cache)
decision = permsys.check_permission(action, perm.username, resource,
perm)
self._cache[key] = (decision, resource)
return decision
__contains__ = has_permission
def require(self, action, realm_or_resource=None, id=False, version=False):
resource = self._normalize_resource(realm_or_resource, id, version)
if not self._has_permission(action, resource):
raise PermissionError(action, resource, self.env)
assert_permission = require
def permissions(self):
"""Deprecated (but still used by the HDF compatibility layer)"""
self.env.log.warning("perm.permissions() is deprecated and "
"is only present for HDF compatibility")
perm = PermissionSystem(self.env)
actions = perm.get_user_permissions(self.username)
return [action for action in actions if action in self]
class PermissionAdmin(Component):
"""trac-admin command provider for permission system administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('permission list', '[user]',
'List permission rules',
self._complete_list, self._do_list)
yield ('permission add', '<user> <action> [action] [...]',
'Add a new permission rule',
self._complete_add, self._do_add)
yield ('permission remove', '<user> <action> [action] [...]',
'Remove a permission rule',
self._complete_remove, self._do_remove)
yield ('permission export', '[file]',
'Export permission rules to a file or stdout as CSV',
self._complete_import_export, self._do_export)
yield ('permission import', '[file]',
'Import permission rules from a file or stdin as CSV',
self._complete_import_export, self._do_import)
def get_user_list(self):
return set(user for (user, action) in
PermissionSystem(self.env).get_all_permissions())
def get_user_perms(self, user):
return [action for (subject, action) in
PermissionSystem(self.env).get_all_permissions()
if subject == user]
def _complete_list(self, args):
if len(args) == 1:
return self.get_user_list()
def _complete_add(self, args):
if len(args) == 1:
return self.get_user_list()
elif len(args) >= 2:
return (set(PermissionSystem(self.env).get_actions())
- set(self.get_user_perms(args[0])) - set(args[1:-1]))
def _complete_remove(self, args):
if len(args) == 1:
return self.get_user_list()
elif len(args) >= 2:
return set(self.get_user_perms(args[0])) - set(args[1:-1])
def _complete_import_export(self, args):
if len(args) == 1:
return get_dir_list(args[-1])
def _do_list(self, user=None):
permsys = PermissionSystem(self.env)
if user:
rows = []
perms = permsys.get_user_permissions(user)
for action in perms:
if perms[action]:
rows.append((user, action))
else:
rows = permsys.get_all_permissions()
rows.sort()
print_table(rows, [_('User'), _('Action')])
print
printout(_("Available actions:"))
actions = permsys.get_actions()
actions.sort()
text = ', '.join(actions)
printout(wrap(text, initial_indent=' ', subsequent_indent=' ',
linesep='\n'))
print
def _do_add(self, user, *actions):
permsys = PermissionSystem(self.env)
if user.isupper():
raise AdminCommandError(_('All upper-cased tokens are reserved '
'for permission names'))
for action in actions:
try:
permsys.grant_permission(user, action)
except self.env.db_exc.IntegrityError:
printout(_("The user %(user)s already has permission "
"%(action)s.", user=user, action=action))
def _do_remove(self, user, *actions):
permsys = PermissionSystem(self.env)
rows = permsys.get_all_permissions()
for action in actions:
found = False
for u, a in rows:
if user in (u, '*') and action in (a, '*'):
permsys.revoke_permission(u, a)
found = True
if not found:
raise AdminCommandError(
_("Cannot remove permission %(action)s for user %(user)s.",
action=action, user=user))
def _do_export(self, filename=None):
try:
with file_or_std(filename, 'wb') as f:
encoding = stream_encoding(f)
linesep = os.linesep if filename else '\n'
writer = csv.writer(f, lineterminator=linesep)
users = self.get_user_list()
for user in sorted(users):
actions = sorted(self.get_user_perms(user))
writer.writerow([s.encode(encoding, 'replace')
for s in [user] + actions])
except IOError, e:
raise AdminCommandError(
_("Cannot export to %(filename)s: %(error)s",
filename=path_to_unicode(filename or 'stdout'),
error=e.strerror))
def _do_import(self, filename=None):
permsys = PermissionSystem(self.env)
try:
with file_or_std(filename, 'rb') as f:
encoding = stream_encoding(f)
linesep = os.linesep if filename else '\n'
reader = csv.reader(f, lineterminator=linesep)
for row in reader:
if len(row) < 2:
raise AdminCommandError(
_("Invalid row %(line)d. Expected <user>, "
"<action>, [action], [...]",
line=reader.line_num))
user = to_unicode(row[0], encoding)
actions = [to_unicode(action, encoding)
for action in row[1:]]
if user.isupper():
raise AdminCommandError(
_("Invalid user %(user)s on line %(line)d: All "
"upper-cased tokens are reserved for permission "
"names.", user=user, line=reader.line_num))
old_actions = self.get_user_perms(user)
for action in set(actions) - set(old_actions):
permsys.grant_permission(user, action)
except csv.Error, e:
raise AdminCommandError(
_("Cannot import from %(filename)s line %(line)d: %(error)s ",
filename=path_to_unicode(filename or 'stdin'),
line=reader.line_num, error=e))
except IOError, e:
raise AdminCommandError(
_("Cannot import from %(filename)s: %(error)s",
filename=path_to_unicode(filename or 'stdin'),
error=e.strerror))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/env.py | trac/trac/env.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2011 Edgewall Software
# Copyright (C) 2003-2007 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
"""Trac Environment model and related APIs."""
from __future__ import with_statement
import os.path
import pkg_resources
import setuptools
import sys
from urlparse import urlsplit
from trac import db_default
from trac.admin import AdminCommandError, IAdminCommandProvider
from trac.cache import CacheManager
from trac.config import *
from trac.core import Component, ComponentManager, implements, Interface, \
ExtensionPoint, TracError
from trac.db.api import (DatabaseManager, QueryContextManager,
TransactionContextManager, with_transaction)
from trac.util import copytree, create_file, get_pkginfo, lazy, makedirs, \
read_file
from trac.util.compat import sha1
from trac.util.concurrency import threading
from trac.util.text import exception_to_unicode, path_to_unicode, printerr, \
printout
from trac.util.translation import _, N_
from trac.versioncontrol import RepositoryManager
from trac.web.href import Href
__all__ = ['Environment', 'IEnvironmentSetupParticipant', 'open_environment']
# Content of the VERSION file in the environment
_VERSION = 'Trac Environment Version 1'
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade(db):
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment(db):
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
class BackupError(RuntimeError):
"""Exception raised during an upgrade when the DB backup fails."""
class Environment(Component, ComponentManager):
"""Trac environment manager.
Trac stores project information in a Trac environment. It consists
of a directory structure containing among other things:
* a configuration file,
* project-specific templates and plugins,
* the wiki and ticket attachments files,
* the SQLite database file (stores tickets, wiki pages...)
in case the database backend is sqlite
"""
implements(ISystemInfoProvider)
required = True
system_info_providers = ExtensionPoint(ISystemInfoProvider)
setup_participants = ExtensionPoint(IEnvironmentSetupParticipant)
components_section = ConfigSection('components',
"""This section is used to enable or disable components
provided by plugins, as well as by Trac itself. The component
to enable/disable is specified via the name of the
option. Whether its enabled is determined by the option value;
setting the value to `enabled` or `on` will enable the
component, any other value (typically `disabled` or `off`)
will disable the component.
The option name is either the fully qualified name of the
components or the module/package prefix of the component. The
former enables/disables a specific component, while the latter
enables/disables any component in the specified
package/module.
Consider the following configuration snippet:
{{{
[components]
trac.ticket.report.ReportModule = disabled
webadmin.* = enabled
}}}
The first option tells Trac to disable the
[wiki:TracReports report module].
The second option instructs Trac to enable all components in
the `webadmin` package. Note that the trailing wildcard is
required for module/package matching.
To view the list of active components, go to the ''Plugins''
page on ''About Trac'' (requires `CONFIG_VIEW`
[wiki:TracPermissions permissions]).
See also: TracPlugins
""")
shared_plugins_dir = PathOption('inherit', 'plugins_dir', '',
"""Path to the //shared plugins directory//.
Plugins in that directory are loaded in addition to those in
the directory of the environment `plugins`, with this one
taking precedence.
(''since 0.11'')""")
base_url = Option('trac', 'base_url', '',
"""Reference URL for the Trac deployment.
This is the base URL that will be used when producing
documents that will be used outside of the web browsing
context, like for example when inserting URLs pointing to Trac
resources in notification e-mails.""")
base_url_for_redirect = BoolOption('trac', 'use_base_url_for_redirect',
False,
"""Optionally use `[trac] base_url` for redirects.
In some configurations, usually involving running Trac behind
a HTTP proxy, Trac can't automatically reconstruct the URL
that is used to access it. You may need to use this option to
force Trac to use the `base_url` setting also for
redirects. This introduces the obvious limitation that this
environment will only be usable when accessible from that URL,
as redirects are frequently used. ''(since 0.10.5)''""")
secure_cookies = BoolOption('trac', 'secure_cookies', False,
"""Restrict cookies to HTTPS connections.
When true, set the `secure` flag on all cookies so that they
are only sent to the server on HTTPS connections. Use this if
your Trac instance is only accessible through HTTPS. (''since
0.11.2'')""")
project_name = Option('project', 'name', 'My Project',
"""Name of the project.""")
project_description = Option('project', 'descr', 'My example project',
"""Short description of the project.""")
project_url = Option('project', 'url', '',
"""URL of the main project web site, usually the website in
which the `base_url` resides. This is used in notification
e-mails.""")
project_admin = Option('project', 'admin', '',
"""E-Mail address of the project's administrator.""")
project_admin_trac_url = Option('project', 'admin_trac_url', '.',
"""Base URL of a Trac instance where errors in this Trac
should be reported.
This can be an absolute or relative URL, or '.' to reference
this Trac instance. An empty value will disable the reporting
buttons. (''since 0.11.3'')""")
project_footer = Option('project', 'footer',
N_('Visit the Trac open source project at<br />'
'<a href="http://trac.edgewall.org/">'
'http://trac.edgewall.org/</a>'),
"""Page footer text (right-aligned).""")
project_icon = Option('project', 'icon', 'common/trac.ico',
"""URL of the icon of the project.""")
log_type = Option('logging', 'log_type', 'none',
"""Logging facility to use.
Should be one of (`none`, `file`, `stderr`, `syslog`, `winlog`).""")
log_file = Option('logging', 'log_file', 'trac.log',
"""If `log_type` is `file`, this should be a path to the
log-file. Relative paths are resolved relative to the `log`
directory of the environment.""")
log_level = Option('logging', 'log_level', 'DEBUG',
"""Level of verbosity in log.
Should be one of (`CRITICAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`).""")
log_format = Option('logging', 'log_format', None,
"""Custom logging format.
If nothing is set, the following will be used:
Trac[$(module)s] $(levelname)s: $(message)s
In addition to regular key names supported by the Python
logger library (see
http://docs.python.org/library/logging.html), one could use:
- $(path)s the path for the current environment
- $(basename)s the last path component of the current environment
- $(project)s the project name
Note the usage of `$(...)s` instead of `%(...)s` as the latter form
would be interpreted by the ConfigParser itself.
Example:
`($(thread)d) Trac[$(basename)s:$(module)s] $(levelname)s: $(message)s`
''(since 0.10.5)''""")
def __init__(self, path, create=False, options=[]):
"""Initialize the Trac environment.
:param path: the absolute path to the Trac environment
:param create: if `True`, the environment is created and
populated with default data; otherwise, the
environment is expected to already exist.
:param options: A list of `(section, name, value)` tuples that
define configuration options
"""
ComponentManager.__init__(self)
self.path = path
self.systeminfo = []
self._href = self._abs_href = None
if create:
self.create(options)
else:
self.verify()
self.setup_config()
if create:
for setup_participant in self.setup_participants:
setup_participant.environment_created()
def get_systeminfo(self):
"""Return a list of `(name, version)` tuples describing the
name and version information of external packages used by Trac
and plugins.
"""
info = self.systeminfo[:]
for provider in self.system_info_providers:
info.extend(provider.get_system_info() or [])
info.sort(key=lambda (name, version): (name != 'Trac', name.lower()))
return info
# ISystemInfoProvider methods
def get_system_info(self):
from trac import core, __version__ as VERSION
yield 'Trac', pkg_resources.resource_string('trac', 'TRAC_VERSION')
yield 'Bloodhound Trac', get_pkginfo(core).get('version', VERSION)
yield 'Python', sys.version
yield 'setuptools', setuptools.__version__
from trac.util.datefmt import pytz
if pytz is not None:
yield 'pytz', pytz.__version__
def component_activated(self, component):
"""Initialize additional member variables for components.
Every component activated through the `Environment` object
gets three member variables: `env` (the environment object),
`config` (the environment configuration) and `log` (a logger
object)."""
component.env = self
component.config = self.config
component.log = self.log
def _component_name(self, name_or_class):
name = name_or_class
if not isinstance(name_or_class, basestring):
name = name_or_class.__module__ + '.' + name_or_class.__name__
return name.lower()
@property
def _component_rules(self):
try:
return self._rules
except AttributeError:
self._rules = {}
for name, value in self.components_section.options():
if name.endswith('.*'):
name = name[:-2]
self._rules[name.lower()] = value.lower() in ('enabled', 'on')
return self._rules
def is_component_enabled(self, cls):
"""Implemented to only allow activation of components that are
not disabled in the configuration.
This is called by the `ComponentManager` base class when a
component is about to be activated. If this method returns
`False`, the component does not get activated. If it returns
`None`, the component only gets activated if it is located in
the `plugins` directory of the environment.
"""
component_name = self._component_name(cls)
# Disable the pre-0.11 WebAdmin plugin
# Please note that there's no recommendation to uninstall the
# plugin because doing so would obviously break the backwards
# compatibility that the new integration administration
# interface tries to provide for old WebAdmin extensions
if component_name.startswith('webadmin.'):
self.log.info("The legacy TracWebAdmin plugin has been "
"automatically disabled, and the integrated "
"administration interface will be used "
"instead.")
return False
rules = self._component_rules
cname = component_name
while cname:
enabled = rules.get(cname)
if enabled is not None:
return enabled
idx = cname.rfind('.')
if idx < 0:
break
cname = cname[:idx]
# By default, all components in the trac package are enabled
return component_name.startswith('trac.') or None
def enable_component(self, cls):
"""Enable a component or module."""
self._component_rules[self._component_name(cls)] = True
def verify(self):
"""Verify that the provided path points to a valid Trac environment
directory."""
try:
tag = read_file(os.path.join(self.path, 'VERSION')).splitlines()[0]
if tag != _VERSION:
raise Exception("Unknown Trac environment type '%s'" % tag)
except Exception, e:
raise TracError("No Trac environment found at %s\n%s"
% (self.path, e))
def get_db_cnx(self):
"""Return a database connection from the connection pool
:deprecated: Use :meth:`db_transaction` or :meth:`db_query` instead
`db_transaction` for obtaining the `db` database connection
which can be used for performing any query
(SELECT/INSERT/UPDATE/DELETE)::
with env.db_transaction as db:
...
Note that within the block, you don't need to (and shouldn't)
call ``commit()`` yourself, the context manager will take care
of it (if it's the outermost such context manager on the
stack).
`db_query` for obtaining a `db` database connection which can
be used for performing SELECT queries only::
with env.db_query as db:
...
"""
return DatabaseManager(self).get_connection()
@lazy
def db_exc(self):
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
To catch a database exception, use the following pattern::
try:
with env.db_transaction as db:
...
except env.db_exc.IntegrityError, e:
...
"""
return DatabaseManager(self).get_exceptions()
def with_transaction(self, db=None):
"""Decorator for transaction functions :deprecated:"""
return with_transaction(self, db)
def get_read_db(self):
"""Return a database connection for read purposes :deprecated:
See `trac.db.api.get_read_db` for detailed documentation."""
return DatabaseManager(self).get_connection(readonly=True)
@property
def db_query(self):
"""Return a context manager
(`~trac.db.api.QueryContextManager`) which can be used to
obtain a read-only database connection.
Example::
with env.db_query as db:
cursor = db.cursor()
cursor.execute("SELECT ...")
for row in cursor.fetchall():
...
Note that a connection retrieved this way can be "called"
directly in order to execute a query::
with env.db_query as db:
for row in db("SELECT ..."):
...
:warning: after a `with env.db_query as db` block, though the
`db` variable is still defined, you shouldn't use it as it
might have been closed when exiting the context, if this
context was the outermost context (`db_query` or
`db_transaction`).
If you don't need to manipulate the connection itself, this
can even be simplified to::
for row in env.db_query("SELECT ..."):
...
"""
return QueryContextManager(self)
@property
def db_transaction(self):
"""Return a context manager
(`~trac.db.api.TransactionContextManager`) which can be used
to obtain a writable database connection.
Example::
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute("UPDATE ...")
Upon successful exit of the context, the context manager will
commit the transaction. In case of nested contexts, only the
outermost context performs a commit. However, should an
exception happen, any context manager will perform a rollback.
You should *not* call `commit()` yourself within such block,
as this will force a commit even if that transaction is part
of a larger transaction.
Like for its read-only counterpart, you can directly execute a
DML query on the `db`::
with env.db_transaction as db:
db("UPDATE ...")
:warning: after a `with env.db_transaction` as db` block,
though the `db` variable is still available, you shouldn't
use it as it might have been closed when exiting the
context, if this context was the outermost context
(`db_query` or `db_transaction`).
If you don't need to manipulate the connection itself, this
can also be simplified to::
env.db_transaction("UPDATE ...")
"""
return TransactionContextManager(self)
def shutdown(self, tid=None):
"""Close the environment."""
RepositoryManager(self).shutdown(tid)
DatabaseManager(self).shutdown(tid)
if tid is None:
self.log.removeHandler(self._log_handler)
self._log_handler.flush()
self._log_handler.close()
del self._log_handler
def get_repository(self, reponame=None, authname=None):
"""Return the version control repository with the given name,
or the default repository if `None`.
The standard way of retrieving repositories is to use the
methods of `RepositoryManager`. This method is retained here
for backward compatibility.
:param reponame: the name of the repository
:param authname: the user name for authorization (not used
anymore, left here for compatibility with
0.11)
"""
return RepositoryManager(self).get_repository(reponame)
def create(self, options=[]):
"""Create the basic directory structure of the environment,
initialize the database and populate the configuration file
with default values.
If options contains ('inherit', 'file'), default values will
not be loaded; they are expected to be provided by that file
or other options.
"""
# Create the directory structure
if not os.path.exists(self.path):
os.mkdir(self.path)
os.mkdir(self.get_log_dir())
os.mkdir(self.get_htdocs_dir())
os.mkdir(os.path.join(self.path, 'plugins'))
# Create a few files
create_file(os.path.join(self.path, 'VERSION'), _VERSION + '\n')
create_file(os.path.join(self.path, 'README'),
'This directory contains a Trac environment.\n'
'Visit http://trac.edgewall.org/ for more information.\n')
# Setup the default configuration
os.mkdir(os.path.join(self.path, 'conf'))
create_file(os.path.join(self.path, 'conf', 'trac.ini.sample'))
config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'))
for section, name, value in options:
config.set(section, name, value)
config.save()
self.setup_config()
if not any((section, option) == ('inherit', 'file')
for section, option, value in options):
self.config.set_defaults(self)
self.config.save()
# Create the database
DatabaseManager(self).init_db()
def get_version(self, db=None, initial=False):
"""Return the current version of the database. If the
optional argument `initial` is set to `True`, the version of
the database used at the time of creation will be returned.
In practice, for database created before 0.11, this will
return `False` which is "older" than any db version number.
:since: 0.11
:since 1.0: deprecation warning: the `db` parameter is no
longer used and will be removed in version 1.1.1
"""
rows = self.db_query("""
SELECT value FROM system WHERE name='%sdatabase_version'
""" % ('initial_' if initial else ''))
return rows and int(rows[0][0])
def setup_config(self):
"""Load the configuration file."""
self.config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'),
{'envname': os.path.basename(self.path)})
self.setup_log()
from trac.loader import load_components
plugins_dir = self.shared_plugins_dir
load_components(self, plugins_dir and (plugins_dir,))
def get_templates_dir(self):
"""Return absolute path to the templates directory."""
return os.path.join(self.path, 'templates')
def get_htdocs_dir(self):
"""Return absolute path to the htdocs directory."""
return os.path.join(self.path, 'htdocs')
def get_log_dir(self):
"""Return absolute path to the log directory."""
return os.path.join(self.path, 'log')
def setup_log(self):
"""Initialize the logging sub-system."""
from trac.log import logger_handler_factory
logtype = self.log_type
logfile = self.log_file
if logtype == 'file' and not os.path.isabs(logfile):
logfile = os.path.join(self.get_log_dir(), logfile)
format = self.log_format
logid = 'Trac.%s' % sha1(self.path).hexdigest()
if format:
format = format.replace('$(', '%(') \
.replace('%(path)s', self.path) \
.replace('%(basename)s', os.path.basename(self.path)) \
.replace('%(project)s', self.project_name)
self.log, self._log_handler = logger_handler_factory(
logtype, logfile, self.log_level, logid, format=format)
from trac import core, __version__ as VERSION
self.log.info('-' * 32 + ' environment startup [Trac %s] ' + '-' * 32,
get_pkginfo(core).get('version', VERSION))
def get_known_users(self, cnx=None):
"""Generator that yields information about all known users,
i.e. users that have logged in to this Trac environment and
possibly set their name and email.
This function generates one tuple for every user, of the form
(username, name, email) ordered alpha-numerically by username.
:param cnx: the database connection; if ommitted, a new
connection is retrieved
:since 1.0: deprecation warning: the `cnx` parameter is no
longer used and will be removed in version 1.1.1
"""
for username, name, email in self.db_query("""
SELECT DISTINCT s.sid, n.value, e.value
FROM session AS s
LEFT JOIN session_attribute AS n ON (n.sid=s.sid
and n.authenticated=1 AND n.name = 'name')
LEFT JOIN session_attribute AS e ON (e.sid=s.sid
AND e.authenticated=1 AND e.name = 'email')
WHERE s.authenticated=1 ORDER BY s.sid
"""):
yield username, name, email
def backup(self, dest=None):
"""Create a backup of the database.
:param dest: Destination file; if not specified, the backup is
stored in a file called db_name.trac_version.bak
"""
return DatabaseManager(self).backup(dest)
def needs_upgrade(self):
"""Return whether the environment needs to be upgraded."""
for participant in self.setup_participants:
with self.db_query as db:
if participant.environment_needs_upgrade(db):
self.log.warn("Component %s requires environment upgrade",
participant)
return True
return False
def upgrade(self, backup=False, backup_dest=None):
"""Upgrade database.
:param backup: whether or not to backup before upgrading
:param backup_dest: name of the backup file
:return: whether the upgrade was performed
"""
upgraders = []
for participant in self.setup_participants:
with self.db_query as db:
if participant.environment_needs_upgrade(db):
upgraders.append(participant)
if not upgraders:
return
if backup:
try:
self.backup(backup_dest)
except Exception, e:
raise BackupError(e)
for participant in upgraders:
self.log.info("%s.%s upgrading...", participant.__module__,
participant.__class__.__name__)
with self.db_transaction as db:
participant.upgrade_environment(db)
# Database schema may have changed, so close all connections
DatabaseManager(self).shutdown()
return True
@property
def href(self):
"""The application root path"""
if not self._href:
self._href = Href(urlsplit(self.abs_href.base)[2])
return self._href
@property
def abs_href(self):
"""The application URL"""
if not self._abs_href:
if not self.base_url:
self.log.warn("base_url option not set in configuration, "
"generated links may be incorrect")
self._abs_href = Href('')
else:
self._abs_href = Href(self.base_url)
return self._abs_href
class EnvironmentSetup(Component):
"""Manage automatic environment upgrades."""
required = True
implements(IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Insert default data into the database."""
with self.env.db_transaction as db:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols), ','.join(['%s' for c in cols])),
vals)
self._update_sample_config()
def environment_needs_upgrade(self, db):
dbver = self.env.get_version(db)
if dbver == db_default.db_version:
return False
elif dbver > db_default.db_version:
raise TracError(_('Database newer than Trac version'))
self.log.info("Trac database schema version is %d, should be %d",
dbver, db_default.db_version)
return True
def upgrade_environment(self, db):
"""Each db version should have its own upgrade module, named
upgrades/dbN.py, where 'N' is the version number (int).
"""
cursor = db.cursor()
dbver = self.env.get_version()
for i in range(dbver + 1, db_default.db_version + 1):
name = 'db%i' % i
try:
upgrades = __import__('upgrades', globals(), locals(), [name])
script = getattr(upgrades, name)
except AttributeError:
raise TracError(_("No upgrade module for version %(num)i "
"(%(version)s.py)", num=i, version=name))
script.do_upgrade(self.env, i, cursor)
cursor.execute("""
UPDATE system SET value=%s WHERE name='database_version'
""", (i,))
self.log.info("Upgraded database version from %d to %d", i - 1, i)
db.commit()
self._update_sample_config()
# Internal methods
def _update_sample_config(self):
filename = os.path.join(self.env.path, 'conf', 'trac.ini.sample')
if not os.path.isfile(filename):
return
config = Configuration(filename)
for section, default_options in config.defaults().iteritems():
for name, value in default_options.iteritems():
config.set(section, name, value)
try:
config.save()
self.log.info("Wrote sample configuration file with the new "
"settings and their default values: %s",
filename)
except IOError, e:
self.log.warn("Couldn't write sample configuration file (%s)", e,
exc_info=True)
env_cache = {}
env_cache_lock = threading.Lock()
def open_environment(env_path=None, use_cache=False):
"""Open an existing environment object, and verify that the database is up
to date.
:param env_path: absolute path to the environment directory; if
ommitted, the value of the `TRAC_ENV` environment
variable is used
:param use_cache: whether the environment should be cached for
subsequent invocations of this function
:return: the `Environment` object
"""
if not env_path:
env_path = os.getenv('TRAC_ENV')
if not env_path:
raise TracError(_('Missing environment variable "TRAC_ENV". '
'Trac requires this variable to point to a valid '
'Trac environment.'))
env_path = os.path.normcase(os.path.normpath(env_path))
if use_cache:
with env_cache_lock:
env = env_cache.get(env_path)
if env and env.config.parse_if_needed():
# The environment configuration has changed, so shut it down
# and remove it from the cache so that it gets reinitialized
env.log.info('Reloading environment due to configuration '
'change')
env.shutdown()
del env_cache[env_path]
env = None
if env is None:
env = env_cache.setdefault(env_path, open_environment(env_path))
else:
CacheManager(env).reset_metadata()
else:
env = Environment(env_path)
needs_upgrade = False
try:
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/html.py | trac/trac/util/html.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from HTMLParser import HTMLParser
import re
from genshi import Markup, HTML, escape, unescape
from genshi.core import stripentities, striptags, START, END
from genshi.builder import Element, ElementFactory, Fragment
from genshi.filters.html import HTMLSanitizer
from genshi.input import ParseError
__all__ = ['escape', 'unescape', 'html', 'plaintext', 'find_element',
'TracHTMLSanitizer', 'Deuglifier', 'FormTokenInjector']
class TracHTMLSanitizer(HTMLSanitizer):
"""Sanitize HTML constructions which are potentially vector of
phishing or XSS attacks, in user-supplied HTML.
See also `genshi.HTMLSanitizer`_.
.. _genshi.HTMLSanitizer:
http://genshi.edgewall.org/wiki/Documentation/filters.html#html-sanitizer
"""
SAFE_CSS = frozenset([
# CSS 3 properties <http://www.w3.org/TR/CSS/#properties>
'background', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'border', 'border-bottom', 'border-bottom-color',
'border-bottom-style', 'border-bottom-width', 'border-collapse',
'border-color', 'border-left', 'border-left-color',
'border-left-style', 'border-left-width', 'border-right',
'border-right-color', 'border-right-style', 'border-right-width',
'border-spacing', 'border-style', 'border-top', 'border-top-color',
'border-top-style', 'border-top-width', 'border-width', 'bottom',
'caption-side', 'clear', 'clip', 'color', 'content',
'counter-increment', 'counter-reset', 'cursor', 'direction', 'display',
'empty-cells', 'float', 'font', 'font-family', 'font-size',
'font-style', 'font-variant', 'font-weight', 'height', 'left',
'letter-spacing', 'line-height', 'list-style', 'list-style-image',
'list-style-position', 'list-style-type', 'margin', 'margin-bottom',
'margin-left', 'margin-right', 'margin-top', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline',
'outline-color', 'outline-style', 'outline-width', 'overflow',
'padding', 'padding-bottom', 'padding-left', 'padding-right',
'padding-top', 'page-break-after', 'page-break-before',
'page-break-inside', 'position', 'quotes', 'right', 'table-layout',
'text-align', 'text-decoration', 'text-indent', 'text-transform',
'top', 'unicode-bidi', 'vertical-align', 'visibility', 'white-space',
'widows', 'width', 'word-spacing', 'z-index',
])
def __init__(self, safe_schemes=HTMLSanitizer.SAFE_SCHEMES,
safe_css=SAFE_CSS):
safe_attrs = HTMLSanitizer.SAFE_ATTRS | frozenset(['style'])
safe_schemes = frozenset(safe_schemes)
super(TracHTMLSanitizer, self).__init__(safe_attrs=safe_attrs,
safe_schemes=safe_schemes)
self.safe_css = frozenset(safe_css)
# IE6 <http://heideri.ch/jso/#80>
_EXPRESSION_SEARCH = re.compile(
u'[eE\uFF25\uFF45]' # FULLWIDTH LATIN CAPITAL LETTER E
# FULLWIDTH LATIN SMALL LETTER E
u'[xX\uFF38\uFF58]' # FULLWIDTH LATIN CAPITAL LETTER X
# FULLWIDTH LATIN SMALL LETTER X
u'[pP\uFF30\uFF50]' # FULLWIDTH LATIN CAPITAL LETTER P
# FULLWIDTH LATIN SMALL LETTER P
u'[rR\u0280\uFF32\uFF52]' # LATIN LETTER SMALL CAPITAL R
# FULLWIDTH LATIN CAPITAL LETTER R
# FULLWIDTH LATIN SMALL LETTER R
u'[eE\uFF25\uFF45]' # FULLWIDTH LATIN CAPITAL LETTER E
# FULLWIDTH LATIN SMALL LETTER E
u'[sS\uFF33\uFF53]{2}' # FULLWIDTH LATIN CAPITAL LETTER S
# FULLWIDTH LATIN SMALL LETTER S
u'[iI\u026A\uFF29\uFF49]' # LATIN LETTER SMALL CAPITAL I
# FULLWIDTH LATIN CAPITAL LETTER I
# FULLWIDTH LATIN SMALL LETTER I
u'[oO\uFF2F\uFF4F]' # FULLWIDTH LATIN CAPITAL LETTER O
# FULLWIDTH LATIN SMALL LETTER O
u'[nN\u0274\uFF2E\uFF4E]' # LATIN LETTER SMALL CAPITAL N
# FULLWIDTH LATIN CAPITAL LETTER N
# FULLWIDTH LATIN SMALL LETTER N
).search
# IE6 <http://openmya.hacker.jp/hasegawa/security/expression.txt>
# 7) Particular bit of Unicode characters
_URL_FINDITER = re.compile(
u'[Uu][Rr\u0280][Ll\u029F]\s*\(([^)]+)').finditer
def sanitize_css(self, text):
decls = []
text = self._strip_css_comments(self._replace_unicode_escapes(text))
for decl in filter(None, text.split(';')):
decl = decl.strip()
if not decl:
continue
try:
prop, value = decl.split(':', 1)
except ValueError:
continue
if not self.is_safe_css(prop.strip().lower(), value.strip()):
continue
is_evil = False
if self._EXPRESSION_SEARCH(decl):
is_evil = True
for match in self._URL_FINDITER(decl):
if not self.is_safe_uri(match.group(1)):
is_evil = True
break
if not is_evil:
decls.append(decl.strip())
return decls
def __call__(self, stream):
"""Remove input type="password" elements from the stream
"""
suppress = False
for kind, data, pos in super(TracHTMLSanitizer, self).__call__(stream):
if kind is START:
tag, attrs = data
if (tag == 'input' and
attrs.get('type', '').lower() == 'password'):
suppress = True
else:
yield kind, data, pos
elif kind is END:
if not suppress:
yield kind, data, pos
suppress = False
else:
yield kind, data, pos
def is_safe_css(self, prop, value):
"""Determine whether the given css property declaration is to be
considered safe for inclusion in the output.
"""
if prop not in self.safe_css:
return False
# Position can be used for phishing, 'static' excepted
if prop == 'position':
return value.lower() == 'static'
# Negative margins can be used for phishing
if prop.startswith('margin'):
return '-' not in value
return True
_NORMALIZE_NEWLINES = re.compile(r'\r\n').sub
_UNICODE_ESCAPE = re.compile(
r"""\\([0-9a-fA-F]{1,6})\s?|\\([^\r\n\f0-9a-fA-F'"{};:()#*])""",
re.UNICODE).sub
def _replace_unicode_escapes(self, text):
def _repl(match):
t = match.group(1)
if t:
code = int(t, 16)
chr = unichr(code)
if code <= 0x1f:
# replace space character because IE ignores control
# characters
chr = ' '
elif chr == '\\':
chr = r'\\'
return chr
t = match.group(2)
if t == '\\':
return r'\\'
else:
return t
return self._UNICODE_ESCAPE(_repl,
self._NORMALIZE_NEWLINES('\n', text))
_CSS_COMMENTS = re.compile(r'/\*.*?\*/').sub
def _strip_css_comments(self, text):
"""Replace comments with space character instead of superclass which
removes comments to avoid problems when nested comments.
"""
return self._CSS_COMMENTS(' ', text)
class Deuglifier(object):
"""Help base class used for cleaning up HTML riddled with ``<FONT
COLOR=...>`` tags and replace them with appropriate ``<span
class="...">``.
The subclass must define a `rules()` static method returning a
list of regular expression fragments, each defining a capture
group in which the name will be reused for the span's class. Two
special group names, ``font`` and ``endfont`` are used to emit
``<span>`` and ``</span>``, respectively.
"""
def __new__(cls):
self = object.__new__(cls)
if not hasattr(cls, '_compiled_rules'):
cls._compiled_rules = re.compile('(?:%s)' % '|'.join(cls.rules()))
self._compiled_rules = cls._compiled_rules
return self
def format(self, indata):
return re.sub(self._compiled_rules, self.replace, indata)
def replace(self, fullmatch):
for mtype, match in fullmatch.groupdict().items():
if match:
if mtype == 'font':
return '<span>'
elif mtype == 'endfont':
return '</span>'
return '<span class="code-%s">' % mtype
class FormTokenInjector(HTMLParser):
"""Identify and protect forms from CSRF attacks.
This filter works by adding a input type=hidden field to POST forms.
"""
def __init__(self, form_token, out):
HTMLParser.__init__(self)
self.out = out
self.token = form_token
def handle_starttag(self, tag, attrs):
self.out.write(self.get_starttag_text())
if tag.lower() == 'form':
for name, value in attrs:
if name.lower() == 'method' and value.lower() == 'post':
self.out.write('<input type="hidden" name="__FORM_TOKEN"'
' value="%s"/>' % self.token)
break
def handle_startendtag(self, tag, attrs):
self.out.write(self.get_starttag_text())
def handle_charref(self, name):
self.out.write('&#%s;' % name)
def handle_entityref(self, name):
self.out.write('&%s;' % name)
def handle_comment(self, data):
self.out.write('<!--%s-->' % data)
def handle_decl(self, data):
self.out.write('<!%s>' % data)
def handle_pi(self, data):
self.out.write('<?%s?>' % data)
def handle_data(self, data):
self.out.write(data)
def handle_endtag(self, tag):
self.out.write('</' + tag + '>')
class TransposingElementFactory(ElementFactory):
"""A `genshi.builder.ElementFactory` which applies `func` to the
named attributes before creating a `genshi.builder.Element`.
"""
def __init__(self, func, namespace=None):
ElementFactory.__init__(self, namespace=namespace)
self.func = func
def __getattr__(self, name):
return ElementFactory.__getattr__(self, self.func(name))
html = TransposingElementFactory(str.lower)
def plaintext(text, keeplinebreaks=True):
"""Extract the text elements from (X)HTML content
:param text: `unicode` or `genshi.builder.Fragment`
:param keeplinebreaks: optionally keep linebreaks
"""
if isinstance(text, Fragment):
text = text.generate().render('text', encoding=None)
else:
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace(u'\n', u' ')
return text
def find_element(frag, attr=None, cls=None):
"""Return the first element in the fragment having the given attribute or
class, using a preorder depth-first search.
"""
if isinstance(frag, Element):
if attr is not None and attr in frag.attrib:
return frag
if cls is not None and cls in frag.attrib.get('class', '').split():
return frag
if isinstance(frag, Fragment):
for child in frag.children:
elt = find_element(child, attr, cls)
if elt is not None:
return elt
def expand_markup(stream, ctxt=None):
"""A Genshi stream filter for expanding `genshi.Markup` events.
Note: Expansion may not be possible if the fragment is badly
formed, or partial.
"""
for event in stream:
if isinstance(event[1], Markup):
try:
for subevent in HTML(event[1]):
yield subevent
except ParseError:
yield event
else:
yield event
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/compat.py | trac/trac/util/compat.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Various classes and functions to provide some backwards-compatibility with
previous versions of Python from 2.5 onward.
"""
import os
# Import symbols previously defined here, kept around so that plugins importing
# them don't suddenly stop working
all = all
any = any
frozenset = frozenset
reversed = reversed
set = set
sorted = sorted
from functools import partial
from hashlib import md5, sha1
from itertools import groupby, tee
class py_groupby(object):
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = xrange(0)
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = self.it.next() # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = self.it.next() # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
def rpartition(s, sep):
return s.rpartition(sep)
# An error is raised by subprocess if we ever pass close_fds=True on Windows.
# We want it to be True on all other platforms to not leak file descriptors.
close_fds = os.name != 'nt'
# inspect.cleandoc() was introduced in 2.6
try:
from inspect import cleandoc
except ImportError:
import sys
# Taken from Python 2.6
def cleandoc(doc):
"""De-indent a multi-line text.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line
margin = sys.maxint
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)):
lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/autoreload.py | trac/trac/util/autoreload.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import sys
import threading
import time
import traceback
_SLEEP_TIME = 1
def _reloader_thread(modification_callback, loop_callback):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
@param modification_callback: a function taking a single argument, the
modified file, which is called every time a modification is detected
@param loop_callback: a function taking no arguments, which is called
after every modification check
"""
mtimes = {}
while True:
for filename in filter(None, [getattr(module, '__file__', None)
for module in sys.modules.values()]):
while not os.path.isfile(filename): # Probably in an egg or zip file
filename = os.path.dirname(filename)
if not filename:
break
if not filename: # Couldn't map to physical file, so just ignore
continue
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
if not os.path.isfile(filename):
# Compiled file for non-existant source
continue
mtime = os.stat(filename).st_mtime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime > mtimes[filename]:
modification_callback(filename)
sys.exit(3)
loop_callback()
time.sleep(_SLEEP_TIME)
def _restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ['RUN_MAIN'] = 'true'
# This call reinvokes ourself and goes into the other branch of main as
# a new process.
exit_code = os.spawnve(os.P_WAIT, sys.executable,
args, new_environ)
if exit_code != 3:
return exit_code
def main(func, modification_callback, *args, **kwargs):
"""Run the given function and restart any time modules are changed."""
if os.environ.get('RUN_MAIN'):
exit_code = []
def main_thread():
try:
func(*args, **kwargs)
exit_code.append(None)
except SystemExit, e:
exit_code.append(e.code)
except:
traceback.print_exception(*sys.exc_info())
exit_code.append(1)
def check_exit():
if exit_code:
sys.exit(exit_code[0])
# Lanch the actual program as a child thread
thread = threading.Thread(target=main_thread, name='Main thread')
thread.setDaemon(True)
thread.start()
try:
# Now wait for a file modification and quit
_reloader_thread(modification_callback, check_exit)
except KeyboardInterrupt:
pass
else:
# Initial invocation just waits around restarting this executable
try:
sys.exit(_restart_with_reloader())
except KeyboardInterrupt:
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/datefmt.py | trac/trac/util/datefmt.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
import math
import re
import sys
import time
from datetime import tzinfo, timedelta, datetime, date
from locale import getlocale, LC_TIME
try:
import babel
from babel import Locale
from babel.core import LOCALE_ALIASES
from babel.dates import (
format_datetime as babel_format_datetime,
format_date as babel_format_date,
format_time as babel_format_time,
get_datetime_format, get_date_format,
get_time_format, get_month_names,
get_period_names, get_day_names
)
except ImportError:
babel = None
from trac.core import TracError
from trac.util.text import to_unicode, getpreferredencoding
from trac.util.translation import _, ngettext, get_available_locales
# Date/time utilities
# -- conversion
def to_datetime(t, tzinfo=None):
"""Convert ``t`` into a `datetime` object in the ``tzinfo`` timezone.
If no ``tzinfo`` is given, the local timezone `localtz` will be used.
``t`` is converted using the following rules:
- If ``t`` is already a `datetime` object,
- if it is timezone-"naive", it is localized to ``tzinfo``
- if it is already timezone-aware, ``t`` is mapped to the given
timezone (`datetime.datetime.astimezone`)
- If ``t`` is None, the current time will be used.
- If ``t`` is a number, it is interpreted as a timestamp.
Any other input will trigger a `TypeError`.
All returned datetime instances are timezone aware and normalized.
"""
tz = tzinfo or localtz
if t is None:
dt = datetime.now(tz)
elif isinstance(t, datetime):
if t.tzinfo:
dt = t.astimezone(tz)
else:
dt = tz.localize(t)
elif isinstance(t, date):
dt = tz.localize(datetime(t.year, t.month, t.day))
elif isinstance(t, (int, long, float)):
if not (_min_ts <= t <= _max_ts):
# Handle microsecond timestamps for 0.11 compatibility
t *= 0.000001
if t < 0 and isinstance(t, float):
# Work around negative fractional times bug in Python 2.4
# http://bugs.python.org/issue1646728
frac, integer = math.modf(t)
dt = datetime.fromtimestamp(integer - 1, tz) + \
timedelta(seconds=frac + 1)
else:
dt = datetime.fromtimestamp(t, tz)
if dt:
return tz.normalize(dt)
raise TypeError('expecting datetime, int, long, float, or None; got %s' %
type(t))
def to_timestamp(dt):
"""Return the corresponding POSIX timestamp"""
if dt:
diff = dt - _epoc
return diff.days * 86400 + diff.seconds
else:
return 0
def to_utimestamp(dt):
"""Return a microsecond POSIX timestamp for the given `datetime`."""
if not dt:
return 0
diff = dt - _epoc
return (diff.days * 86400000000L + diff.seconds * 1000000
+ diff.microseconds)
def from_utimestamp(ts):
"""Return the `datetime` for the given microsecond POSIX timestamp."""
return _epoc + timedelta(microseconds=ts or 0)
# -- formatting
_units = (
(3600*24*365, lambda r: ngettext('%(num)d year', '%(num)d years', r)),
(3600*24*30, lambda r: ngettext('%(num)d month', '%(num)d months', r)),
(3600*24*7, lambda r: ngettext('%(num)d week', '%(num)d weeks', r)),
(3600*24, lambda r: ngettext('%(num)d day', '%(num)d days', r)),
(3600, lambda r: ngettext('%(num)d hour', '%(num)d hours', r)),
(60, lambda r: ngettext('%(num)d minute', '%(num)d minutes', r)))
def pretty_timedelta(time1, time2=None, resolution=None):
"""Calculate time delta between two `datetime` objects.
(the result is somewhat imprecise, only use for prettyprinting).
If either `time1` or `time2` is None, the current time will be used
instead.
"""
time1 = to_datetime(time1)
time2 = to_datetime(time2)
if time1 > time2:
time2, time1 = time1, time2
diff = time2 - time1
age_s = int(diff.days * 86400 + diff.seconds)
if resolution and age_s < resolution:
return ''
if age_s <= 60 * 1.9:
return ngettext('%(num)i second', '%(num)i seconds', age_s)
for u, format_units in _units:
r = float(age_s) / float(u)
if r >= 1.9:
r = int(round(r))
return format_units(r)
return ''
_BABEL_FORMATS = {
'datetime': {'short': '%x %H:%M', 'medium': '%x %X', 'long': '%x %X',
'full': '%x %X'},
'date': {'short': '%x', 'medium': '%x', 'long': '%x', 'full': '%x'},
'time': {'short': '%H:%M', 'medium': '%X', 'long': '%X', 'full': '%X'},
}
_ISO8601_FORMATS = {
'datetime': {
'%x %X': 'iso8601', '%x': 'iso8601date', '%X': 'iso8601time',
'short': '%Y-%m-%dT%H:%M', 'medium': '%Y-%m-%dT%H:%M:%S',
'long': 'iso8601', 'full': 'iso8601',
'iso8601': 'iso8601', None: 'iso8601'},
'date': {
'%x %X': 'iso8601', '%x': 'iso8601date', '%X': 'iso8601time',
'short': 'iso8601date', 'medium': 'iso8601date',
'long': 'iso8601date', 'full': 'iso8601date',
'iso8601': 'iso8601date', None: 'iso8601date'},
'time': {
'%x %X': 'iso8601', '%x': 'iso8601date', '%X': 'iso8601time',
'short': '%H:%M', 'medium': '%H:%M:%S',
'long': 'iso8601time', 'full': 'iso8601time',
'iso8601': 'iso8601time', None: 'iso8601time'},
}
_STRFTIME_HINTS = {'%x %X': 'datetime', '%x': 'date', '%X': 'time'}
def _format_datetime_without_babel(t, format):
normalize_Z = False
if format.lower().startswith('iso8601'):
if 'date' in format:
format = '%Y-%m-%d'
elif 'time' in format:
format = '%H:%M:%S%z'
normalize_Z = True
else:
format = '%Y-%m-%dT%H:%M:%S%z'
normalize_Z = True
text = t.strftime(str(format))
if normalize_Z:
text = text.replace('+0000', 'Z')
if not text.endswith('Z'):
text = text[:-2] + ":" + text[-2:]
encoding = getlocale(LC_TIME)[1] or getpreferredencoding() \
or sys.getdefaultencoding()
return unicode(text, encoding, 'replace')
def _format_datetime(t, format, tzinfo, locale, hint):
t = to_datetime(t, tzinfo or localtz)
if (format in ('iso8601', 'iso8601date', 'iso8601time') or
locale == 'iso8601'):
format = _ISO8601_FORMATS[hint].get(format, format)
return _format_datetime_without_babel(t, format)
if babel and locale:
if format is None:
format = 'medium'
elif format in _STRFTIME_HINTS:
hint = _STRFTIME_HINTS[format]
format = 'medium'
if format in ('short', 'medium', 'long', 'full'):
if hint == 'datetime':
return babel_format_datetime(t, format, None, locale)
if hint == 'date':
return babel_format_date(t, format, locale)
if hint == 'time':
return babel_format_time(t, format, None, locale)
format = _BABEL_FORMATS[hint].get(format, format)
return _format_datetime_without_babel(t, format)
def format_datetime(t=None, format='%x %X', tzinfo=None, locale=None):
"""Format the `datetime` object `t` into an `unicode` string
If `t` is None, the current time will be used.
The formatting will be done using the given `format`, which consist
of conventional `strftime` keys. In addition the format can be 'iso8601'
to specify the international date format (compliant with RFC 3339).
`tzinfo` will default to the local timezone if left to `None`.
"""
return _format_datetime(t, format, tzinfo, locale, 'datetime')
def format_date(t=None, format='%x', tzinfo=None, locale=None):
"""Convenience method for formatting the date part of a `datetime` object.
See `format_datetime` for more details.
"""
return _format_datetime(t, format, tzinfo, locale, 'date')
def format_time(t=None, format='%X', tzinfo=None, locale=None):
"""Convenience method for formatting the time part of a `datetime` object.
See `format_datetime` for more details.
"""
return _format_datetime(t, format, tzinfo, locale, 'time')
def get_date_format_hint(locale=None):
"""Present the default format used by `format_date` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
if locale == 'iso8601':
return 'YYYY-MM-DD'
if babel and locale:
format = get_date_format('medium', locale=locale)
return format.pattern
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1)
def get_datetime_format_hint(locale=None):
"""Present the default format used by `format_datetime` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
if locale == 'iso8601':
return u'YYYY-MM-DDThh:mm:ss±hh:mm'
if babel and locale:
date_pattern = get_date_format('medium', locale=locale).pattern
time_pattern = get_time_format('medium', locale=locale).pattern
format = get_datetime_format('medium', locale=locale)
return format.replace('{0}', time_pattern) \
.replace('{1}', date_pattern)
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc)
ampm = format_time(t, '%p', tzinfo=utc)
if ampm:
tmpl = tmpl.replace(ampm, 'a', 1)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1) \
.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def get_month_names_jquery_ui(req):
"""Get the month names for the jQuery UI datepicker library"""
locale = req.lc_time
if locale == 'iso8601':
locale = req.locale
if babel and locale:
month_names = {}
for width in ('wide', 'abbreviated'):
names = get_month_names(width, locale=locale)
month_names[width] = [names[i + 1] for i in xrange(12)]
return month_names
return {
'wide': (
'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December'),
'abbreviated': (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'),
}
def get_day_names_jquery_ui(req):
"""Get the day names for the jQuery UI datepicker library"""
locale = req.lc_time
if locale == 'iso8601':
locale = req.locale
if babel and locale:
day_names = {}
for width in ('wide', 'abbreviated', 'narrow'):
names = get_day_names(width, locale=locale)
day_names[width] = [names[(i + 6) % 7] for i in xrange(7)]
return day_names
return {
'wide': ('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday'),
'abbreviated': ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'),
'narrow': ('Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa'),
}
def get_date_format_jquery_ui(locale):
"""Get the date format for the jQuery UI datepicker library."""
if locale == 'iso8601':
return 'yy-mm-dd'
if babel and locale:
values = {'yyyy': 'yy', 'y': 'yy', 'M': 'm', 'MM': 'mm', 'MMM': 'M',
'd': 'd', 'dd': 'dd'}
return get_date_format('medium', locale=locale).format % values
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'yy', 1).replace('99', 'y', 1) \
.replace('10', 'mm', 1).replace('29', 'dd', 1)
def get_time_format_jquery_ui(locale):
"""Get the time format for the jQuery UI timepicker addon."""
if locale == 'iso8601':
return 'hh:mm:ssz' # XXX timepicker doesn't support 'ISO_8601'
if babel and locale:
values = {'h': 'h', 'hh': 'hh', 'H': 'h', 'HH': 'hh',
'm': 'm', 'mm': 'mm', 's': 's', 'ss': 'ss',
'a': 'TT'}
return get_time_format('medium', locale=locale).format % values
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_time(t, tzinfo=utc)
ampm = format_time(t, '%p', tzinfo=utc)
if ampm:
tmpl = tmpl.replace(ampm, 'TT', 1)
return tmpl.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def get_timezone_list_jquery_ui(t=None):
"""Get timezone list for jQuery timepicker addon"""
t = datetime.now(utc) if t is None else utc.localize(t)
zones = set(t.astimezone(get_timezone(tz)).strftime('%z')
for tz in all_timezones)
return [{'value': 'Z', 'label': '+00:00'} \
if zone == '+0000' else zone[:-2] + ':' + zone[-2:]
for zone in sorted(zones, key=lambda tz: int(tz))]
def get_first_week_day_jquery_ui(req):
"""Get first week day for jQuery date picker"""
locale = req.lc_time
if locale == 'iso8601':
return 1 # Monday
if babel and locale:
if not locale.territory and locale.language in LOCALE_ALIASES:
locale = Locale.parse(LOCALE_ALIASES[locale.language])
return (locale.first_week_day + 1) % 7
return 0 # Sunday
def is_24_hours(locale):
"""Returns `True` for 24 hour time formats."""
if locale == 'iso8601':
return True
t = datetime(1999, 10, 29, 23, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc, locale=locale)
return '23' in tmpl
def http_date(t=None):
"""Format `datetime` object `t` as a rfc822 timestamp"""
t = to_datetime(t, utc)
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
return '%s, %02d %s %04d %02d:%02d:%02d GMT' % (
weekdays[t.weekday()], t.day, months[t.month - 1], t.year,
t.hour, t.minute, t.second)
# -- parsing
_ISO_8601_RE = re.compile(r'''
(\d\d\d\d)(?:-?(\d\d)(?:-?(\d\d))?)? # date
(?:
[T ]
(\d\d)(?::?(\d\d)(?::?(\d\d) # time
(?:[,.](\d{1,6}))?)?)? # microseconds
)?
(Z?(?:([-+])?(\d\d):?(\d\d)?)?)?$ # timezone
''', re.VERBOSE)
def _parse_date_iso8601(text, tzinfo):
match = _ISO_8601_RE.match(text)
if match:
try:
g = match.groups()
years = g[0]
months = g[1] or '01'
days = g[2] or '01'
hours, minutes, seconds, useconds = [x or '00' for x in g[3:7]]
useconds = (useconds + '000000')[:6]
z, tzsign, tzhours, tzminutes = g[7:11]
if z:
tz = timedelta(hours=int(tzhours or '0'),
minutes=int(tzminutes or '0')).seconds / 60
if tz == 0:
tzinfo = utc
else:
tzinfo = FixedOffset(-tz if tzsign == '-' else tz,
'%s%s:%s' %
(tzsign, tzhours, tzminutes))
tm = [int(x) for x in (years, months, days,
hours, minutes, seconds, useconds)]
t = tzinfo.localize(datetime(*tm))
return tzinfo.normalize(t)
except ValueError:
pass
return None
def parse_date(text, tzinfo=None, locale=None, hint='date'):
tzinfo = tzinfo or localtz
text = text.strip()
dt = _parse_date_iso8601(text, tzinfo)
if dt is None and locale != 'iso8601':
if babel and locale:
dt = _i18n_parse_date(text, tzinfo, locale)
else:
for format in ['%x %X', '%x, %X', '%X %x', '%X, %x', '%x', '%c',
'%b %d, %Y']:
try:
tm = time.strptime(text, format)
dt = tzinfo.localize(datetime(*tm[0:6]))
dt = tzinfo.normalize(dt)
break
except ValueError:
continue
if dt is None:
dt = _parse_relative_time(text, tzinfo)
if dt is None:
hint = {'datetime': get_datetime_format_hint,
'date': get_date_format_hint
}.get(hint, lambda(l): hint)(locale)
raise TracError(_('"%(date)s" is an invalid date, or the date format '
'is not known. Try "%(hint)s" instead.',
date=text, hint=hint), _('Invalid Date'))
# Make sure we can convert it to a timestamp and back - fromtimestamp()
# may raise ValueError if larger than platform C localtime() or gmtime()
try:
datetime.utcfromtimestamp(to_timestamp(dt))
except ValueError:
raise TracError(_('The date "%(date)s" is outside valid range. '
'Try a date closer to present time.', date=text),
_('Invalid Date'))
return dt
def _i18n_parse_date_pattern(locale):
format_keys = {
'y': ('y', 'Y'),
'M': ('M',),
'd': ('d',),
'h': ('h', 'H'),
'm': ('m',),
's': ('s',),
}
regexp = [r'[0-9]+']
date_format = get_date_format('medium', locale=locale)
time_format = get_time_format('medium', locale=locale)
datetime_format = get_datetime_format('medium', locale=locale)
formats = (
datetime_format.replace('{0}', time_format.format) \
.replace('{1}', date_format.format),
date_format.format)
orders = []
for format in formats:
order = []
for key, chars in format_keys.iteritems():
for char in chars:
idx = format.find('%(' + char)
if idx != -1:
order.append((idx, key))
break
order.sort()
order = dict((key, idx) for idx, (_, key) in enumerate(order))
orders.append(order)
month_names = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
if formats[0].find('%(MMM)s') != -1:
for width in ('wide', 'abbreviated'):
names = get_month_names(width, locale=locale)
for num, name in names.iteritems():
name = name.lower()
month_names[name] = num
regexp.extend(month_names.iterkeys())
period_names = {'am': 'am', 'pm': 'pm'}
if formats[0].find('%(a)s') != -1:
names = get_period_names(locale=locale)
for period, name in names.iteritems():
name = name.lower()
period_names[name] = period
regexp.extend(period_names.iterkeys())
return {
'orders': orders,
'regexp': re.compile('(%s)' % '|'.join(regexp),
re.IGNORECASE | re.UNICODE),
'month_names': month_names,
'period_names': period_names,
}
_I18N_PARSE_DATE_PATTERNS = dict(map(lambda l: (l, False),
get_available_locales()))
def _i18n_parse_date(text, tzinfo, locale):
locale = Locale.parse(locale)
key = str(locale)
pattern = _I18N_PARSE_DATE_PATTERNS.get(key)
if pattern is False:
pattern = _i18n_parse_date_pattern(locale)
_I18N_PARSE_DATE_PATTERNS[key] = pattern
if pattern is None:
return None
regexp = pattern['regexp']
period_names = pattern['period_names']
month_names = pattern['month_names']
text = text.lower()
for order in pattern['orders']:
try:
return _i18n_parse_date_0(text, order, regexp, period_names,
month_names, tzinfo)
except ValueError:
continue
return None
def _i18n_parse_date_0(text, order, regexp, period_names, month_names, tzinfo):
matches = regexp.findall(text)
if not matches:
return None
# remove am/pm markers on ahead
period = None
for idx, match in enumerate(matches):
period = period_names.get(match)
if period is not None:
del matches[idx]
break
# for date+time, use 0 seconds if seconds are missing
if 's' in order and len(matches) == 5:
matches.insert(order['s'], 0)
values = {}
for key, idx in order.iteritems():
if idx < len(matches):
value = matches[idx]
if key == 'y':
if len(value) == 2 and value.isdigit():
value = '20' + value
values[key] = value
if 'y' not in values or 'M' not in values or 'd' not in values:
raise ValueError
for key in ('y', 'M', 'd'):
value = values[key]
value = month_names.get(value)
if value is not None:
if key == 'M':
values[key] = value
else:
values[key], values['M'] = values['M'], value
break
values = dict((key, int(value)) for key, value in values.iteritems())
values.setdefault('h', 0)
values.setdefault('m', 0)
values.setdefault('s', 0)
if period and values['h'] <= 12:
if period == 'am':
values['h'] %= 12
elif period == 'pm':
values['h'] = values['h'] % 12 + 12
t = tzinfo.localize(datetime(*(values[k] for k in 'yMdhms')))
return tzinfo.normalize(t)
_REL_TIME_RE = re.compile(
r'(\d+\.?\d*)\s*'
r'(second|minute|hour|day|week|month|year|[hdwmy])s?\s*'
r'(?:ago)?$')
_time_intervals = dict(
second=lambda v: timedelta(seconds=v),
minute=lambda v: timedelta(minutes=v),
hour=lambda v: timedelta(hours=v),
day=lambda v: timedelta(days=v),
week=lambda v: timedelta(weeks=v),
month=lambda v: timedelta(days=30 * v),
year=lambda v: timedelta(days=365 * v),
h=lambda v: timedelta(hours=v),
d=lambda v: timedelta(days=v),
w=lambda v: timedelta(weeks=v),
m=lambda v: timedelta(days=30 * v),
y=lambda v: timedelta(days=365 * v),
)
_TIME_START_RE = re.compile(r'(this|last)\s*'
r'(second|minute|hour|day|week|month|year)$')
_time_starts = dict(
second=lambda now: datetime(now.year, now.month, now.day, now.hour,
now.minute, now.second),
minute=lambda now: datetime(now.year, now.month, now.day, now.hour,
now.minute),
hour=lambda now: datetime(now.year, now.month, now.day, now.hour),
day=lambda now: datetime(now.year, now.month, now.day),
week=lambda now: datetime(now.year, now.month, now.day) \
- timedelta(days=now.weekday()),
month=lambda now: datetime(now.year, now.month, 1),
year=lambda now: datetime(now.year, 1, 1),
)
def _parse_relative_time(text, tzinfo, now=None):
if now is None: # now argument for unit tests
now = datetime.now(tzinfo)
if text == 'now':
return now
dt = None
if text == 'today':
dt = _time_starts['day'](now)
elif text == 'yesterday':
dt = _time_starts['day'](now) - timedelta(days=1)
if dt is None:
match = _REL_TIME_RE.match(text)
if match:
(value, interval) = match.groups()
dt = now - _time_intervals[interval](float(value))
if dt is None:
match = _TIME_START_RE.match(text)
if match:
(which, start) = match.groups()
dt = _time_starts[start](now)
if which == 'last':
if start == 'month':
if dt.month > 1:
dt = dt.replace(month=dt.month - 1)
else:
dt = dt.replace(year=dt.year - 1, month=12)
elif start == 'year':
dt = dt.replace(year=dt.year - 1)
else:
dt -= _time_intervals[start](1)
if dt is None:
return None
if not dt.tzinfo:
dt = tzinfo.localize(dt)
return tzinfo.normalize(dt)
# -- formatting/parsing helper functions
def user_time(req, func, *args, **kwargs):
"""A helper function which passes to `tzinfo` and `locale` keyword
arguments of `func` using `req` parameter. It is expected to be used with
`format_*` and `parse_date` methods in `trac.util.datefmt` package.
:param req: a instance of `Request`
:param func: a function which must accept `tzinfo` and `locale` keyword
arguments
:param args: arguments which pass to `func` function
:param kwargs: keyword arguments which pass to `func` function
"""
if 'tzinfo' not in kwargs:
kwargs['tzinfo'] = getattr(req, 'tz', None)
if 'locale' not in kwargs:
kwargs['locale'] = getattr(req, 'lc_time', None)
return func(*args, **kwargs)
# -- timezone utilities
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return _zero
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
class LocalTimezone(tzinfo):
"""A 'local' time zone implementation"""
_std_offset = None
_dst_offset = None
_dst_diff = None
_std_tz = None
_dst_tz = None
@classmethod
def _initialize(cls):
cls._std_tz = cls(False)
cls._std_offset = timedelta(seconds=-time.timezone)
if time.daylight:
cls._dst_tz = cls(True)
cls._dst_offset = timedelta(seconds=-time.altzone)
else:
cls._dst_tz = cls._std_tz
cls._dst_offset = cls._std_offset
cls._dst_diff = cls._dst_offset - cls._std_offset
def __init__(self, is_dst=None):
self.is_dst = is_dst
def __str__(self):
offset = self.utcoffset(datetime.now())
secs = offset.days * 3600 * 24 + offset.seconds
hours, rem = divmod(abs(secs), 3600)
return 'UTC%c%02d:%02d' % ('-' if secs < 0 else '+', hours, rem / 60)
def __repr__(self):
if self.is_dst is None:
return '<LocalTimezone "%s" %s "%s" %s>' % \
(time.tzname[False], self._std_offset,
time.tzname[True], self._dst_offset)
if self.is_dst:
offset = self._dst_offset
else:
offset = self._std_offset
return '<LocalTimezone "%s" %s>' % (time.tzname[self.is_dst], offset)
def _is_dst(self, dt, is_dst=False):
if self.is_dst is not None:
return self.is_dst
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.weekday(), 0)
try:
std_tt = time.localtime(time.mktime(tt + (0,)))
dst_tt = time.localtime(time.mktime(tt + (1,)))
except (ValueError, OverflowError):
return False
std_correct = std_tt.tm_isdst == 0
dst_correct = dst_tt.tm_isdst == 1
if std_correct is dst_correct:
if is_dst is None:
if std_correct is True:
raise ValueError('Ambiguous time "%s"' % dt)
if std_correct is False:
raise ValueError('Non existent time "%s"' % dt)
return is_dst
if std_correct:
return False
if dst_correct:
return True
def utcoffset(self, dt):
if self._is_dst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._is_dst(dt):
return self._dst_diff
else:
return _zero
def tzname(self, dt):
return time.tzname[self._is_dst(dt)]
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
if self._is_dst(dt, is_dst):
tz = self._dst_tz
else:
tz = self._std_tz
return dt.replace(tzinfo=tz)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
if dt.tzinfo is localtz: # if not localized, returns without changes
return dt
return self.fromutc(dt.replace(tzinfo=self) - dt.utcoffset())
def fromutc(self, dt):
if dt.tzinfo is None or dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
tt = time.localtime(to_timestamp(dt.replace(tzinfo=utc)))
if tt.tm_isdst > 0:
tz = self._dst_tz
else:
tz = self._std_tz
return datetime(microsecond=dt.microsecond, tzinfo=tz, *tt[0:6])
utc = FixedOffset(0, 'UTC')
utcmin = datetime.min.replace(tzinfo=utc)
utcmax = datetime.max.replace(tzinfo=utc)
_epoc = datetime(1970, 1, 1, tzinfo=utc)
_zero = timedelta(0)
_min_ts = -(1 << 31)
_max_ts = (1 << 31) - 1
LocalTimezone._initialize()
localtz = LocalTimezone()
STDOFFSET = LocalTimezone._std_offset
DSTOFFSET = LocalTimezone._dst_offset
DSTDIFF = LocalTimezone._dst_diff
# Use a makeshift timezone implementation if pytz is not available.
# This implementation only supports fixed offset time zones.
#
_timezones = [
FixedOffset(0, 'UTC'),
FixedOffset(-720, 'GMT -12:00'), FixedOffset(-660, 'GMT -11:00'),
FixedOffset(-600, 'GMT -10:00'), FixedOffset(-540, 'GMT -9:00'),
FixedOffset(-480, 'GMT -8:00'), FixedOffset(-420, 'GMT -7:00'),
FixedOffset(-360, 'GMT -6:00'), FixedOffset(-300, 'GMT -5:00'),
FixedOffset(-240, 'GMT -4:00'), FixedOffset(-180, 'GMT -3:00'),
FixedOffset(-120, 'GMT -2:00'), FixedOffset(-60, 'GMT -1:00'),
FixedOffset(0, 'GMT'), FixedOffset(60, 'GMT +1:00'),
FixedOffset(120, 'GMT +2:00'), FixedOffset(180, 'GMT +3:00'),
FixedOffset(240, 'GMT +4:00'), FixedOffset(300, 'GMT +5:00'),
FixedOffset(360, 'GMT +6:00'), FixedOffset(420, 'GMT +7:00'),
FixedOffset(480, 'GMT +8:00'), FixedOffset(540, 'GMT +9:00'),
FixedOffset(600, 'GMT +10:00'), FixedOffset(660, 'GMT +11:00'),
FixedOffset(720, 'GMT +12:00'), FixedOffset(780, 'GMT +13:00')]
_tzmap = dict([(z.zone, z) for z in _timezones])
all_timezones = [z.zone for z in _timezones]
try:
import pytz
_tzoffsetmap = dict([(tz.utcoffset(None), tz) for tz in _timezones
if tz.zone != 'UTC'])
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
tz = get_timezone(tzname)
if not tz:
raise KeyError(tzname)
return tz
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
try:
# if given unicode parameter, pytz.timezone fails with:
# "type() argument 1 must be string, not unicode"
tz = pytz.timezone(to_unicode(tzname).encode('ascii', 'replace'))
except (KeyError, IOError):
tz = _tzmap.get(tzname)
if tz and tzname.startswith('Etc/'):
tz = _tzoffsetmap.get(tz.utcoffset(None))
return tz
_pytz_zones = [tzname for tzname in pytz.common_timezones
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/translation.py | trac/trac/util/translation.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Utilities for text translation with gettext."""
from __future__ import with_statement
import pkg_resources
import re
from genshi.builder import tag
from trac.util.concurrency import ThreadLocal, threading
from trac.util.compat import cleandoc
__all__ = ['gettext', 'ngettext', 'gettext_noop', 'ngettext_noop',
'tgettext', 'tgettext_noop', 'tngettext', 'tngettext_noop']
def safefmt(string, kwargs):
if kwargs:
try:
return string % kwargs
except KeyError:
pass
return string
def gettext_noop(string, **kwargs):
return safefmt(string, kwargs)
def dgettext_noop(domain, string, **kwargs):
return gettext_noop(string, **kwargs)
N_ = _noop = lambda string: string
cleandoc_ = cleandoc
def ngettext_noop(singular, plural, num, **kwargs):
string = singular if num == 1 else plural
kwargs.setdefault('num', num)
return safefmt(string, kwargs)
def dngettext_noop(domain, singular, plural, num, **kwargs):
return ngettext_noop(singular, plural, num, **kwargs)
_param_re = re.compile(r"%\((\w+)\)(?:s|[\d]*d|\d*.?\d*[fg])")
def _tag_kwargs(trans, kwargs):
trans_elts = _param_re.split(trans)
for i in xrange(1, len(trans_elts), 2):
trans_elts[i] = kwargs.get(trans_elts[i], '???')
return tag(*trans_elts)
def tgettext_noop(string, **kwargs):
return _tag_kwargs(string, kwargs) if kwargs else string
def dtgettext_noop(domain, string, **kwargs):
return tgettext_noop(string, **kwargs)
def tngettext_noop(singular, plural, num, **kwargs):
string = singular if num == 1 else plural
kwargs.setdefault('num', num)
return _tag_kwargs(string, kwargs)
def dtngettext_noop(domain, singular, plural, num, **kwargs):
return tngettext_noop(singular, plural, num, **kwargs)
def add_domain(domain, env_path, locale_dir):
pass
def domain_functions(domain, *symbols):
if symbols and not isinstance(symbols[0], basestring):
symbols = symbols[0]
_functions = {
'gettext': gettext_noop,
'_': gettext_noop,
'N_': _noop,
'ngettext': ngettext_noop,
'tgettext': tgettext_noop,
'tag_': tgettext_noop,
'tngettext': tngettext_noop,
'tagn_': tngettext_noop,
'add_domain': lambda env_path, locale_dir: None,
}
return [_functions[s] for s in symbols]
from gettext import NullTranslations
class NullTranslationsBabel(NullTranslations):
"""NullTranslations doesn't have the domain related methods."""
def dugettext(self, domain, string):
return self.ugettext(string)
def dungettext(self, domain, singular, plural, num):
return self.ungettext(singular, plural, num)
has_babel = False
try:
from babel import Locale
from babel.support import LazyProxy, Translations
class TranslationsProxy(object):
"""Delegate Translations calls to the currently active Translations.
If there's none, wrap those calls in LazyProxy objects.
Activation is controlled by `activate` and `deactivate` methods.
However, if retrieving the locale information is costly, it's also
possible to enable activation on demand only, by providing a callable
to `make_activable`.
"""
def __init__(self):
self._current = ThreadLocal(args=None, translations=None)
self._null_translations = NullTranslationsBabel()
self._plugin_domains = {}
self._plugin_domains_lock = threading.RLock()
self._activate_failed = False
# Public API
def add_domain(self, domain, env_path, locales_dir):
with self._plugin_domains_lock:
domains = self._plugin_domains.setdefault(env_path, {})
domains[domain] = locales_dir
def make_activable(self, get_locale, env_path=None):
self._current.args = (get_locale, env_path)
def activate(self, locale, env_path=None):
try:
locale_dir = pkg_resources.resource_filename('trac', 'locale')
except Exception:
self._activate_failed = True
return
t = Translations.load(locale_dir, locale or 'en_US')
if not t or t.__class__ is NullTranslations:
t = self._null_translations
else:
t.add(Translations.load(locale_dir, locale or 'en_US',
'tracini'))
if env_path:
with self._plugin_domains_lock:
domains = self._plugin_domains.get(env_path, {})
domains = domains.items()
for domain, dirname in domains:
t.add(Translations.load(dirname, locale, domain))
self._current.translations = t
self._activate_failed = False
def deactivate(self):
self._current.args = None
t, self._current.translations = self._current.translations, None
return t
def reactivate(self, t):
if t:
self._current.translations = t
@property
def active(self):
return self._current.translations or self._null_translations
@property
def isactive(self):
if self._current.args is not None:
get_locale, env_path = self._current.args
self._current.args = None
self.activate(get_locale(), env_path)
# FIXME: The following always returns True: either a translation is
# active, or activation has failed.
return self._current.translations is not None \
or self._activate_failed
# Delegated methods
def __getattr__(self, name):
return getattr(self.active, name)
def gettext(self, string, **kwargs):
def _gettext():
return safefmt(self.active.ugettext(string), kwargs)
if not self.isactive:
return LazyProxy(_gettext)
return _gettext()
def dgettext(self, domain, string, **kwargs):
def _dgettext():
return safefmt(self.active.dugettext(domain, string), kwargs)
if not self.isactive:
return LazyProxy(_dgettext)
return _dgettext()
def ngettext(self, singular, plural, num, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault('num', num)
def _ngettext():
trans = self.active.ungettext(singular, plural, num)
return safefmt(trans, kwargs)
if not self.isactive:
return LazyProxy(_ngettext)
return _ngettext()
def dngettext(self, domain, singular, plural, num, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault('num', num)
def _dngettext():
trans = self.active.dungettext(domain, singular, plural, num)
return safefmt(trans, kwargs)
if not self.isactive:
return LazyProxy(_dngettext)
return _dngettext()
def tgettext(self, string, **kwargs):
def _tgettext():
trans = self.active.ugettext(string)
return _tag_kwargs(trans, kwargs) if kwargs else trans
if not self.isactive:
return LazyProxy(_tgettext)
return _tgettext()
def dtgettext(self, domain, string, **kwargs):
def _dtgettext():
trans = self.active.dugettext(domain, string)
return _tag_kwargs(trans, kwargs) if kwargs else trans
if not self.isactive:
return LazyProxy(_dtgettext)
return _dtgettext()
def tngettext(self, singular, plural, num, **kwargs):
kwargs = kwargs.copy()
kwargs.setdefault('num', num)
def _tngettext():
trans = self.active.ungettext(singular, plural, num)
return _tag_kwargs(trans, kwargs)
if not self.isactive:
return LazyProxy(_tngettext)
return _tngettext()
def dtngettext(self, domain, singular, plural, num, **kwargs):
kwargs = kwargs.copy()
def _dtngettext():
trans = self.active.dungettext(domain, singular, plural, num)
if '%(num)' in trans:
kwargs.update(num=num)
return _tag_kwargs(trans, kwargs) if kwargs else trans
if not self.isactive:
return LazyProxy(_dtngettext)
return _dtngettext()
translations = TranslationsProxy()
def domain_functions(domain, *symbols):
"""Prepare partial instantiations of domain translation functions.
:param domain: domain used for partial instantiation
:param symbols: remaining parameters are the name of commonly used
translation function which will be bound to the domain
Note: the symbols can also be given as an iterable in the 2nd argument.
"""
if symbols and not isinstance(symbols[0], basestring):
symbols = symbols[0]
_functions = {
'gettext': translations.dgettext,
'_': translations.dgettext,
'ngettext': translations.dngettext,
'tgettext': translations.dtgettext,
'tag_': translations.dtgettext,
'tngettext': translations.dtngettext,
'tagn_': translations.dtngettext,
'add_domain': translations.add_domain,
}
def wrapdomain(symbol):
if symbol == 'N_':
return _noop
return lambda *args, **kw: _functions[symbol](domain, *args, **kw)
return [wrapdomain(s) for s in symbols]
gettext = translations.gettext
_ = gettext
dgettext = translations.dgettext
ngettext = translations.ngettext
dngettext = translations.dngettext
tgettext = translations.tgettext
tag_ = tgettext
dtgettext = translations.dtgettext
tngettext = translations.tngettext
tagn_ = tngettext
dtngettext = translations.dtngettext
def deactivate():
"""Deactivate translations.
:return: the current Translations, if any
"""
return translations.deactivate()
def reactivate(t):
"""Reactivate previously deactivated translations.
:param t: the Translations, as returned by `deactivate`
"""
return translations.reactivate(t)
def make_activable(get_locale, env_path=None):
"""Defer activation of translations.
:param get_locale: a callable returning a Babel Locale object
:param env_path: the environment to use for looking up catalogs
"""
translations.make_activable(get_locale, env_path)
def activate(locale, env_path=None):
translations.activate(locale, env_path)
def add_domain(domain, env_path, locale_dir):
translations.add_domain(domain, env_path, locale_dir)
def get_translations():
return translations
def get_available_locales():
"""Return a list of locale identifiers of the locales for which
translations are available.
"""
try:
return [dirname for dirname
in pkg_resources.resource_listdir('trac', 'locale')
if '.' not in dirname]
except Exception:
return []
def get_negotiated_locale(preferred_locales):
def normalize(locale_ids):
return [id.replace('-', '_') for id in locale_ids if id]
return Locale.negotiate(normalize(preferred_locales),
normalize(get_available_locales()))
has_babel = True
except ImportError: # fall back on 0.11 behavior, i18n functions are no-ops
gettext = _ = gettext_noop
dgettext = dgettext_noop
ngettext = ngettext_noop
dngettext = dngettext_noop
tgettext = tag_ = tgettext_noop
dtgettext = dtgettext_noop
tngettext = tagn_ = tngettext_noop
dtngettext = dtngettext_noop
translations = NullTranslationsBabel()
def activate(locale, env_path=None):
pass
def deactivate():
pass
def reactivate(t):
pass
def make_activable(get_locale, env_path=None):
pass
def get_translations():
return translations
def get_available_locales():
return []
def get_negotiated_locale(preferred=None, default=None):
return None
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/dist.py | trac/trac/util/dist.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
# Imports for backward compatibility
from trac.dist import get_l10n_cmdclass, get_l10n_js_cmdclass
try:
from trac.dist import extract_javascript_script
except ImportError:
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/presentation.py | trac/trac/util/presentation.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# Copyright (C) 2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
"""Various utility functions and classes that support common presentation
tasks such as grouping or pagination.
"""
from math import ceil
import re
__all__ = ['captioned_button', 'classes', 'first_last', 'group', 'istext',
'prepared_paginate', 'paginate', 'Paginator']
__no_apidoc__ = 'prepared_paginate'
def captioned_button(req, symbol, text):
"""Return symbol and text or only symbol, according to user preferences."""
return symbol if req.session.get('ui.use_symbols') \
else u'%s %s' % (symbol, text)
def classes(*args, **kwargs):
"""Helper function for dynamically assembling a list of CSS class names
in templates.
Any positional arguments are added to the list of class names. All
positional arguments must be strings:
>>> classes('foo', 'bar')
u'foo bar'
In addition, the names of any supplied keyword arguments are added if they
have a truth value:
>>> classes('foo', bar=True)
u'foo bar'
>>> classes('foo', bar=False)
u'foo'
If none of the arguments are added to the list, this function returns
`None`:
>>> classes(bar=False)
"""
classes = list(filter(None, args)) + [k for k, v in kwargs.items() if v]
if not classes:
return None
return u' '.join(classes)
def first_last(idx, seq):
"""Generate ``first`` or ``last`` or both, according to the
position `idx` in sequence `seq`.
"""
return classes(first=idx == 0, last=idx == len(seq) - 1)
def group(iterable, num, predicate=None):
"""Combines the elements produced by the given iterable so that every `n`
items are returned as a tuple.
>>> items = [1, 2, 3, 4]
>>> for item in group(items, 2):
... print item
(1, 2)
(3, 4)
The last tuple is padded with `None` values if its' length is smaller than
`num`.
>>> items = [1, 2, 3, 4, 5]
>>> for item in group(items, 2):
... print item
(1, 2)
(3, 4)
(5, None)
The optional `predicate` parameter can be used to flag elements that should
not be packed together with other items. Only those elements where the
predicate function returns True are grouped with other elements, otherwise
they are returned as a tuple of length 1:
>>> items = [1, 2, 3, 4]
>>> for item in group(items, 2, lambda x: x != 3):
... print item
(1, 2)
(3,)
(4, None)
"""
buf = []
for item in iterable:
flush = predicate and not predicate(item)
if buf and flush:
buf += [None] * (num - len(buf))
yield tuple(buf)
del buf[:]
buf.append(item)
if flush or len(buf) == num:
yield tuple(buf)
del buf[:]
if buf:
buf += [None] * (num - len(buf))
yield tuple(buf)
def istext(text):
"""`True` for text (`unicode` and `str`), but `False` for `Markup`."""
from genshi.core import Markup
return isinstance(text, basestring) and not isinstance(text, Markup)
def prepared_paginate(items, num_items, max_per_page):
if max_per_page == 0:
num_pages = 1
else:
num_pages = int(ceil(float(num_items) / max_per_page))
return items, num_items, num_pages
def paginate(items, page=0, max_per_page=10):
"""Simple generic pagination.
Given an iterable, this function returns:
* the slice of objects on the requested page,
* the total number of items, and
* the total number of pages.
The `items` parameter can be a list, tuple, or iterator:
>>> items = range(12)
>>> items
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> paginate(items)
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(items, page=1)
([10, 11], 12, 2)
>>> paginate(iter(items))
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(iter(items), page=1)
([10, 11], 12, 2)
This function also works with generators:
>>> def generate():
... for idx in range(12):
... yield idx
>>> paginate(generate())
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 12, 2)
>>> paginate(generate(), page=1)
([10, 11], 12, 2)
The `max_per_page` parameter can be used to set the number of items that
should be displayed per page:
>>> items = range(12)
>>> paginate(items, page=0, max_per_page=6)
([0, 1, 2, 3, 4, 5], 12, 2)
>>> paginate(items, page=1, max_per_page=6)
([6, 7, 8, 9, 10, 11], 12, 2)
"""
if not page:
page = 0
start = page * max_per_page
stop = start + max_per_page
count = None
if hasattr(items, '__len__'):
count = len(items)
if count:
assert start < count, 'Page %d out of range' % page
try: # Try slicing first for better performance
retval = items[start:stop]
except TypeError: # Slicing not supported, so iterate through the whole list
retval = []
idx = -1 # Needed if items = []
for idx, item in enumerate(items):
if start <= idx < stop:
retval.append(item)
# If we already obtained the total number of items via `len()`,
# we can break out of the loop as soon as we've got the last item
# for the requested page
if count is not None and idx >= stop:
break
if count is None:
count = idx + 1
return retval, count, int(ceil(float(count) / max_per_page))
class Paginator(object):
"""Pagination controller"""
def __init__(self, items, page=0, max_per_page=10, num_items=None):
if not page:
page = 0
if num_items is None:
items, num_items, num_pages = paginate(items, page, max_per_page)
else:
items, num_items, num_pages = prepared_paginate(items, num_items,
max_per_page)
offset = page * max_per_page
self.page = page
self.max_per_page = max_per_page
self.items = items
self.num_items = num_items
self.num_pages = num_pages
self.span = offset, offset + len(items)
self.show_index = True
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __nonzero__(self):
return len(self.items) > 0
def __setitem__(self, idx, value):
self.items[idx] = value
@property
def has_more_pages(self):
return self.num_pages > 1
@property
def has_next_page(self):
return self.page + 1 < self.num_pages
@property
def has_previous_page(self):
return self.page > 0
def get_shown_pages(self, page_index_count = 11):
if self.has_more_pages == False:
return range(1, 2)
min_page = 1
max_page = int(ceil(float(self.num_items) / self.max_per_page))
current_page = self.page + 1
start_page = current_page - page_index_count / 2
end_page = current_page + page_index_count / 2 + \
(page_index_count % 2 - 1)
if start_page < min_page:
start_page = min_page
if end_page > max_page:
end_page = max_page
return range(start_page, end_page + 1)
def displayed_items(self):
from trac.util.translation import _
start, stop = self.span
total = self.num_items
if start + 1 == stop:
return _("%(last)d of %(total)d", last=stop, total=total)
else:
return _("%(start)d - %(stop)d of %(total)d",
start=self.span[0] + 1, stop=self.span[1], total=total)
def separated(items, sep=','):
"""Yield `(item, sep)` tuples, one for each element in `items`.
`sep` will be `None` for the last item.
>>> list(separated([1, 2]))
[(1, ','), (2, None)]
>>> list(separated([1]))
[(1, None)]
>>> list(separated("abc", ':'))
[('a', ':'), ('b', ':'), ('c', None)]
"""
items = iter(items)
last = items.next()
for i in items:
yield last, sep
last = i
yield last, None
try:
from json import dumps
_js_quote = dict((c, '\\u%04x' % ord(c)) for c in '&<>')
_js_quote_re = re.compile('[' + ''.join(_js_quote) + ']')
def to_json(value):
"""Encode `value` to JSON."""
def replace(match):
return _js_quote[match.group(0)]
text = dumps(value, sort_keys=True, separators=(',', ':'))
return _js_quote_re.sub(replace, text)
except ImportError:
from trac.util.text import to_js_string
def to_json(value):
"""Encode `value` to JSON."""
if isinstance(value, basestring):
return to_js_string(value)
elif value is None:
return 'null'
elif value is False:
return 'false'
elif value is True:
return 'true'
elif isinstance(value, (int, long)):
return str(value)
elif isinstance(value, float):
return repr(value)
elif isinstance(value, (list, tuple)):
return '[%s]' % ','.join(to_json(each) for each in value)
elif isinstance(value, dict):
return '{%s}' % ','.join('%s:%s' % (to_json(k), to_json(v))
for k, v in sorted(value.iteritems()))
else:
raise TypeError('Cannot encode type %s' % value.__class__.__name__)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/daemon.py | trac/trac/util/daemon.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import atexit
import errno
import os
import signal
import sys
def daemonize(pidfile=None, progname=None, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null', umask=022):
"""Fork a daemon process."""
if pidfile:
# Check whether the pid file already exists and refers to a still
# process running
pidfile = os.path.abspath(pidfile)
if os.path.exists(pidfile):
with open(pidfile) as fileobj:
try:
pid = int(fileobj.read())
except ValueError:
sys.exit('Invalid pid in file %s\nPlease remove it to '
'proceed' % pidfile)
try: # signal the process to see if it is still running
os.kill(pid, 0)
if not progname:
progname = os.path.basename(sys.argv[0])
sys.exit('%s is already running with pid %s' % (progname, pid))
except OSError, e:
if e.errno != errno.ESRCH:
raise
# The pid file must be writable
try:
fileobj = open(pidfile, 'a+')
fileobj.close()
except IOError, e:
from trac.util.text import exception_to_unicode
sys.exit('Error writing to pid file: %s' % exception_to_unicode(e))
# Perform first fork
pid = os.fork()
if pid > 0:
sys.exit(0) # exit first parent
# Decouple from parent environment
os.chdir('/')
os.umask(umask)
os.setsid()
# Perform second fork
pid = os.fork()
if pid > 0:
sys.exit(0) # exit second parent
# The process is now daemonized, redirect standard file descriptors
for stream in sys.stdout, sys.stderr:
stream.flush()
stdin = open(stdin, 'r')
stdout = open(stdout, 'a+')
stderr = open(stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
if pidfile:
# Register signal handlers to ensure atexit hooks are called on exit
for signum in [signal.SIGTERM, signal.SIGHUP]:
signal.signal(signum, handle_signal)
# Create/update the pid file, and register a hook to remove it when the
# process exits
def remove_pidfile():
if os.path.exists(pidfile):
os.remove(pidfile)
atexit.register(remove_pidfile)
with open(pidfile, 'w') as fileobj:
fileobj.write(str(os.getpid()))
def handle_signal(signum, frame):
"""Handle signals sent to the daemonized process."""
sys.exit()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/__init__.py | trac/trac/util/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
from __future__ import with_statement
import errno
import inspect
from itertools import izip, tee
import locale
import os.path
from pkg_resources import find_distributions
import random
import re
import shutil
import sys
import tempfile
import time
from urllib import quote, unquote, urlencode
from .compat import any, md5, sha1, sorted
from .text import exception_to_unicode, to_unicode, getpreferredencoding
# -- req, session and web utils
def get_reporter_id(req, arg_name=None):
"""Get most informative "reporter" identity out of a request.
That's the `Request`'s authname if not 'anonymous', or a `Request`
argument, or the session name and e-mail, or only the name or only
the e-mail, or 'anonymous' as last resort.
:param req: a `trac.web.api.Request`
:param arg_name: if given, a `Request` argument which may contain
the id for non-authentified users
"""
if req.authname != 'anonymous':
return req.authname
if arg_name:
r = req.args.get(arg_name)
if r:
return r
name = req.session.get('name', None)
email = req.session.get('email', None)
if name and email:
return '%s <%s>' % (name, email)
return name or email or req.authname # == 'anonymous'
def content_disposition(type=None, filename=None):
"""Generate a properly escaped Content-Disposition header."""
type = type or ''
if filename is not None:
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
if type:
type += '; '
type += 'filename=' + quote(filename, safe='')
return type
# -- os utilities
if os.name == 'nt':
from getpass import getuser
else:
import pwd
def getuser():
"""Retrieve the identity of the process owner"""
try:
return pwd.getpwuid(os.geteuid())[0]
except KeyError:
return 'unknown'
try:
WindowsError = WindowsError
except NameError:
class WindowsError(OSError):
"""Dummy exception replacing WindowsError on non-Windows platforms"""
can_rename_open_file = False
if os.name == 'nt':
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
MOVEFILE_REPLACE_EXISTING = 0x1
MOVEFILE_WRITE_THROUGH = 0x8
MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
return MoveFileEx(src, dst, MOVEFILE_REPLACE_EXISTING
| MOVEFILE_WRITE_THROUGH)
CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = CreateTransaction(None, 0, 0, 0, 0, 10000, 'Trac rename')
if ta == -1:
return False
try:
return (MoveFileTransacted(src, dst, None, None,
MOVEFILE_REPLACE_EXISTING
| MOVEFILE_WRITE_THROUGH, ta)
and CommitTransaction(ta))
finally:
CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError, e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
class AtomicFile(object):
"""A file that appears atomically with its full content.
This file-like object writes to a temporary file in the same directory
as the final file. If the file is committed, the temporary file is renamed
atomically (on Unix, at least) to its final name. If it is rolled back,
the temporary file is removed.
"""
def __init__(self, path, mode='w', bufsize=-1):
self._file = None
self._path = path
(dir, name) = os.path.split(path)
(fd, self._temp) = tempfile.mkstemp(prefix=name + '-', dir=dir)
self._file = os.fdopen(fd, mode, bufsize)
# Try to preserve permissions and group ownership, but failure
# should not be fatal
try:
st = os.stat(path)
if hasattr(os, 'chmod'):
os.chmod(self._temp, st.st_mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
os.chflags(self._temp, st.st_flags)
if hasattr(os, 'chown'):
os.chown(self._temp, -1, st.st_gid)
except OSError:
pass
def __getattr__(self, name):
return getattr(self._file, name)
def commit(self):
if self._file is None:
return
try:
f, self._file = self._file, None
f.close()
rename(self._temp, self._path)
except Exception:
os.unlink(self._temp)
raise
def rollback(self):
if self._file is None:
return
try:
f, self._file = self._file, None
f.close()
finally:
try:
os.unlink(self._temp)
except Exception:
pass
close = commit
__del__ = rollback
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
closed = property(lambda self: self._file is None or self._file.closed)
def read_file(path, mode='r'):
"""Read a file and return its content."""
with open(path, mode) as f:
return f.read()
def create_file(path, data='', mode='w'):
"""Create a new file with the given data."""
with open(path, mode) as f:
if data:
f.write(data)
def create_unique_file(path):
"""Create a new file. An index is added if the path exists"""
parts = os.path.splitext(path)
idx = 1
while 1:
try:
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
if hasattr(os, 'O_BINARY'):
flags += os.O_BINARY
return path, os.fdopen(os.open(path, flags, 0666), 'w')
except OSError, e:
if e.errno != errno.EEXIST:
raise
idx += 1
# A sanity check
if idx > 100:
raise Exception('Failed to create unique name: ' + path)
path = '%s.%d%s' % (parts[0], idx, parts[1])
class NaivePopen:
"""This is a deadlock-safe version of popen that returns an object with
errorlevel, out (a string) and err (a string).
The optional `input`, which must be a `str` object, is first written
to a temporary file from which the process will read.
(`capturestderr` may not work under Windows 9x.)
Example::
print Popen3('grep spam','\\n\\nhere spam\\n\\n').out
"""
def __init__(self, command, input=None, capturestderr=None):
outfile = tempfile.mktemp()
command = '( %s ) > %s' % (command, outfile)
if input is not None:
infile = tempfile.mktemp()
tmp = open(infile, 'w')
tmp.write(input)
tmp.close()
command = command + ' <' + infile
if capturestderr:
errfile = tempfile.mktemp()
command = command + ' 2>' + errfile
try:
self.err = None
self.errorlevel = os.system(command) >> 8
outfd = file(outfile, 'r')
self.out = outfd.read()
outfd.close()
if capturestderr:
errfd = file(errfile,'r')
self.err = errfd.read()
errfd.close()
finally:
if os.path.isfile(outfile):
os.remove(outfile)
if input and os.path.isfile(infile):
os.remove(infile)
if capturestderr and os.path.isfile(errfile):
os.remove(errfile)
def makedirs(path, overwrite=False):
"""Create as many directories as necessary to make `path` exist.
If `overwrite` is `True`, don't raise an exception in case `path`
already exists.
"""
if overwrite and os.path.exists(path):
return
os.makedirs(path)
def copytree(src, dst, symlinks=False, skip=[], overwrite=False):
"""Recursively copy a directory tree using copy2() (from shutil.copytree.)
Added a `skip` parameter consisting of absolute paths
which we don't want to copy.
"""
def str_path(path):
if isinstance(path, unicode):
path = path.encode(sys.getfilesystemencoding() or
getpreferredencoding())
return path
def remove_if_overwriting(path):
if overwrite and os.path.exists(path):
os.unlink(path)
skip = [str_path(f) for f in skip]
def copytree_rec(src, dst):
names = os.listdir(src)
makedirs(dst, overwrite=overwrite)
errors = []
for name in names:
srcname = os.path.join(src, name)
if srcname in skip:
continue
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
remove_if_overwriting(dstname)
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree_rec(srcname, dstname)
else:
remove_if_overwriting(dstname)
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, OSError), why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error, err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except WindowsError, why:
pass # Ignore errors due to limited Windows copystat support
except OSError, why:
errors.append((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
copytree_rec(str_path(src), str_path(dst))
def is_path_below(path, parent):
"""Return True iff `path` is equal to parent or is located below `parent`
at any level.
"""
path = os.path.abspath(path)
parent = os.path.abspath(parent)
return path == parent or path.startswith(parent + os.sep)
class file_or_std(object):
"""Context manager for opening a file or using a standard stream
If `filename` is non-empty, open the file and close it when exiting the
block. Otherwise, use `sys.stdin` if opening for reading, or `sys.stdout`
if opening for writing or appending."""
file = None
def __init__(self, filename, mode='r', bufsize=-1):
self.filename = filename
self.mode = mode
self.bufsize = bufsize
def __enter__(self):
if not self.filename:
return sys.stdin if 'r' in self.mode else sys.stdout
self.file = open(self.filename, self.mode, self.bufsize)
return self.file
def __exit__(self, et, ev, tb):
if self.file is not None:
self.file.close()
# -- sys utils
def fq_class_name(obj):
"""Return the fully qualified class name of given object."""
c = type(obj)
m, n = c.__module__, c.__name__
return n if m == '__builtin__' else '%s.%s' % (m, n)
def arity(f):
"""Return the number of arguments expected by the given function, unbound
or bound method.
"""
return f.func_code.co_argcount - bool(getattr(f, 'im_self', False))
def get_last_traceback():
"""Retrieve the last traceback as an `unicode` string."""
import traceback
from StringIO import StringIO
tb = StringIO()
traceback.print_exc(file=tb)
return to_unicode(tb.getvalue())
_egg_path_re = re.compile(r'build/bdist\.[^/]+/egg/(.*)')
def get_lines_from_file(filename, lineno, context=0, globals=None):
"""Return `content` number of lines before and after the specified
`lineno` from the (source code) file identified by `filename`.
Returns a `(lines_before, line, lines_after)` tuple.
"""
# The linecache module can load source code from eggs since Python 2.6.
# Prior versions return lines from the wrong file, so we try locating
# the file in eggs manually first.
lines = []
match = _egg_path_re.match(filename)
if match:
import zipfile
for path in sys.path:
try:
zip = zipfile.ZipFile(path, 'r')
try:
lines = zip.read(match.group(1)).splitlines()
break
finally:
zip.close()
except Exception:
pass
if not lines:
import linecache
linecache.checkcache(filename)
lines = linecache.getlines(filename, globals)
if not 0 <= lineno < len(lines):
return (), None, ()
lbound = max(0, lineno - context)
ubound = lineno + 1 + context
charset = None
rep = re.compile('coding[=:]\s*([-\w.]+)')
for linestr in lines[:2]:
match = rep.search(linestr)
if match:
charset = match.group(1)
break
before = [to_unicode(l.rstrip('\n'), charset)
for l in lines[lbound:lineno]]
line = to_unicode(lines[lineno].rstrip('\n'), charset)
after = [to_unicode(l.rstrip('\n'), charset) \
for l in lines[lineno + 1:ubound]]
return before, line, after
def get_frame_info(tb):
"""Return frame information for a traceback."""
frames = []
while tb:
tb_hide = tb.tb_frame.f_locals.get('__traceback_hide__')
if tb_hide in ('before', 'before_and_this'):
del frames[:]
tb_hide = tb_hide[6:]
if not tb_hide:
filename = tb.tb_frame.f_code.co_filename
filename = filename.replace('\\', '/')
lineno = tb.tb_lineno - 1
before, line, after = get_lines_from_file(filename, lineno, 5,
tb.tb_frame.f_globals)
frames.append({'traceback': tb, 'filename': filename,
'lineno': lineno, 'line': line,
'lines_before': before, 'lines_after': after,
'function': tb.tb_frame.f_code.co_name,
'vars': tb.tb_frame.f_locals})
tb = tb.tb_next
return frames
def safe__import__(module_name):
"""
Safe imports: rollback after a failed import.
Initially inspired from the RollbackImporter in PyUnit,
but it's now much simpler and works better for our needs.
See http://pyunit.sourceforge.net/notes/reloading.html
"""
already_imported = sys.modules.copy()
try:
return __import__(module_name, globals(), locals(), [])
except Exception, e:
for modname in sys.modules.copy():
if not already_imported.has_key(modname):
del(sys.modules[modname])
raise e
def safe_repr(x):
"""`repr` replacement which "never" breaks.
Make sure we always get a representation of the input `x`
without risking to trigger an exception (e.g. from a buggy
`x.__repr__`).
.. versionadded :: 1.0
"""
try:
return to_unicode(repr(x))
except Exception, e:
return "<%s object at 0x%X (repr() error: %s)>" % (
fq_class_name(x), id(x), exception_to_unicode(e))
def get_doc(obj):
"""Return the docstring of an object as a tuple `(summary, description)`,
where `summary` is the first paragraph and `description` is the remaining
text.
"""
doc = inspect.getdoc(obj)
if not doc:
return (None, None)
doc = to_unicode(doc).split('\n\n', 1)
summary = doc[0].replace('\n', ' ')
description = doc[1] if len(doc) > 1 else None
return (summary, description)
_dont_import = frozenset(['__file__', '__name__', '__package__'])
def import_namespace(globals_dict, module_name):
"""Import the namespace of a module into a globals dict.
This function is used in stub modules to import all symbols defined in
another module into the global namespace of the stub, usually for
backward compatibility.
"""
__import__(module_name)
module = sys.modules[module_name]
globals_dict.update(item for item in module.__dict__.iteritems()
if item[0] not in _dont_import)
globals_dict.pop('import_namespace', None)
# -- setuptools utils
def get_module_path(module):
"""Return the base path the given module is imported from"""
path = module.__file__
module_name = module.__name__
if path.endswith(('.pyc', '.pyo')):
path = path[:-1]
if os.path.basename(path) == '__init__.py':
path = os.path.dirname(path)
base_path = os.path.splitext(path)[0]
while base_path.replace(os.sep, '.').endswith(module_name):
base_path = os.path.dirname(base_path)
module_name = '.'.join(module_name.split('.')[:-1])
if not module_name:
break
return base_path
def get_sources(path):
"""Return a dictionary mapping Python module source paths to the
distributions that contain them.
"""
sources = {}
for dist in find_distributions(path, only=True):
try:
toplevels = dist.get_metadata('top_level.txt').splitlines()
toplevels = [each + '/' for each in toplevels]
files = dist.get_metadata('SOURCES.txt').splitlines()
sources.update((src, dist) for src in files
if any(src.startswith(toplevel)
for toplevel in toplevels))
except (KeyError, IOError):
pass # Metadata not found
return sources
def get_pkginfo(dist):
"""Get a dictionary containing package information for a package
`dist` can be either a Distribution instance or, as a shortcut,
directly the module instance, if one can safely infer a Distribution
instance from it.
Always returns a dictionary but it will be empty if no Distribution
instance can be created for the given module.
"""
import types
if isinstance(dist, types.ModuleType):
module = dist
module_path = get_module_path(module)
for dist in find_distributions(module_path, only=True):
if os.path.isfile(module_path) or \
dist.key == module.__name__.lower():
break
else:
return {}
import email
attrs = ('author', 'author-email', 'license', 'home-page', 'summary',
'description', 'version')
info = {}
def normalize(attr):
return attr.lower().replace('-', '_')
try:
pkginfo = email.message_from_string(dist.get_metadata('PKG-INFO'))
for attr in [key for key in attrs if key in pkginfo]:
info[normalize(attr)] = pkginfo[attr]
except IOError, e:
err = 'Failed to read PKG-INFO file for %s: %s' % (dist, e)
for attr in attrs:
info[normalize(attr)] = err
except email.Errors.MessageError, e:
err = 'Failed to parse PKG-INFO file for %s: %s' % (dist, e)
for attr in attrs:
info[normalize(attr)] = err
return info
# -- crypto utils
try:
os.urandom(16)
urandom = os.urandom
except NotImplementedError:
_entropy = random.Random()
def urandom(n):
result = []
hasher = sha1(str(os.getpid()) + str(time.time()))
while len(result) * hasher.digest_size < n:
hasher.update(str(_entropy.random()))
result.append(hasher.digest())
result = ''.join(result)
return result[:n] if len(result) > n else result
def hex_entropy(digits=32):
"""Generate `digits` number of hex digits of entropy."""
result = ''.join('%.2x' % ord(v) for v in urandom((digits + 1) // 2))
return result[:digits] if len(result) > digits else result
# Original license for md5crypt:
# Based on FreeBSD src/lib/libcrypt/crypt.c 1.2
#
# "THE BEER-WARE LICENSE" (Revision 42):
# <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
def md5crypt(password, salt, magic='$1$'):
"""Based on FreeBSD src/lib/libcrypt/crypt.c 1.2
:param password: the plain text password to crypt
:param salt: the raw salt
:param magic: our magic string
"""
# /* The password first, since that is what is most unknown */
# /* Then our magic string */
# /* Then the raw salt */
m = md5(password + magic + salt)
# /* Then just as many characters of the MD5(pw,salt,pw) */
mixin = md5(password + salt + password).digest()
for i in range(0, len(password)):
m.update(mixin[i % 16])
# /* Then something really weird... */
# Also really broken, as far as I can tell. -m
i = len(password)
while i:
if i & 1:
m.update('\x00')
else:
m.update(password[0])
i >>= 1
final = m.digest()
# /* and now, just to make sure things don't run too fast */
for i in range(1000):
m2 = md5()
if i & 1:
m2.update(password)
else:
m2.update(final)
if i % 3:
m2.update(salt)
if i % 7:
m2.update(password)
if i & 1:
m2.update(final)
else:
m2.update(password)
final = m2.digest()
# This is the bit that uses to64() in the original code.
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):
v = ord(final[a]) << 16 | ord(final[b]) << 8 | ord(final[c])
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord(final[11])
for i in range(2):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged
# -- data structures
class Ranges(object):
"""Holds information about ranges parsed from a string
:author: Tim Hatch
>>> x = Ranges("1,2,9-15")
>>> 1 in x
True
>>> 5 in x
False
>>> 10 in x
True
>>> 16 in x
False
>>> [i for i in range(20) if i in x]
[1, 2, 9, 10, 11, 12, 13, 14, 15]
Also supports iteration, which makes that last example a bit simpler:
>>> list(x)
[1, 2, 9, 10, 11, 12, 13, 14, 15]
Note that it automatically reduces the list and short-circuits when the
desired ranges are a relatively small portion of the entire set:
>>> x = Ranges("99")
>>> 1 in x # really fast
False
>>> x = Ranges("1, 2, 1-2, 2") # reduces this to 1-2
>>> x.pairs
[(1, 2)]
>>> x = Ranges("1-9,2-4") # handle ranges that completely overlap
>>> list(x)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
The members 'a' and 'b' refer to the min and max value of the range, and
are None if the range is empty:
>>> x.a
1
>>> x.b
9
>>> e = Ranges()
>>> e.a, e.b
(None, None)
Empty ranges are ok, and ranges can be constructed in pieces, if you
so choose:
>>> x = Ranges()
>>> x.appendrange("1, 2, 3")
>>> x.appendrange("5-9")
>>> x.appendrange("2-3") # reduce'd away
>>> list(x)
[1, 2, 3, 5, 6, 7, 8, 9]
Reversed ranges are ignored, unless the Ranges has the `reorder` property
set.
>>> str(Ranges("20-10"))
''
>>> str(Ranges("20-10", reorder=True))
'10-20'
As rendered ranges are often using u',\u200b' (comma + Zero-width
space) to enable wrapping, we also support reading such ranges, as
they can be copy/pasted back.
>>> str(Ranges(u'1,\u200b3,\u200b5,\u200b6,\u200b7,\u200b9'))
'1,3,5-7,9'
"""
RE_STR = ur'[0-9]+(?:[-:][0-9]+)?(?:,\u200b?[0-9]+(?:[-:][0-9]+)?)*'
def __init__(self, r=None, reorder=False):
self.pairs = []
self.a = self.b = None
self.reorder = reorder
self.appendrange(r)
def appendrange(self, r):
"""Add ranges to the current one.
A range is specified as a string of the form "low-high", and
`r` can be a list of such strings, a string containing comma-separated
ranges, or `None`.
"""
if not r:
return
p = self.pairs
if isinstance(r, basestring):
r = re.split(u',\u200b?', r)
for x in r:
try:
a, b = map(int, x.split('-', 1))
except ValueError:
a, b = int(x), int(x)
if b >= a:
p.append((a, b))
elif self.reorder:
p.append((b, a))
self._reduce()
def _reduce(self):
"""Come up with the minimal representation of the ranges"""
p = self.pairs
p.sort()
i = 0
while i + 1 < len(p):
if p[i+1][0]-1 <= p[i][1]: # this item overlaps with the next
# make the first include the second
p[i] = (p[i][0], max(p[i][1], p[i+1][1]))
del p[i+1] # delete the second, after adjusting my endpoint
else:
i += 1
if p:
self.a = p[0][0] # min value
self.b = p[-1][1] # max value
else:
self.a = self.b = None
def __iter__(self):
"""
This is another way I came up with to do it. Is it faster?
from itertools import chain
return chain(*[xrange(a, b+1) for a, b in self.pairs])
"""
for a, b in self.pairs:
for i in range(a, b+1):
yield i
def __contains__(self, x):
"""
>>> 55 in Ranges()
False
"""
# short-circuit if outside the possible range
if self.a is not None and self.a <= x <= self.b:
for a, b in self.pairs:
if a <= x <= b:
return True
if b > x: # short-circuit if we've gone too far
break
return False
def __str__(self):
"""Provide a compact string representation of the range.
>>> (str(Ranges("1,2,3,5")), str(Ranges()), str(Ranges('2')))
('1-3,5', '', '2')
>>> str(Ranges('99-1')) # only nondecreasing ranges allowed
''
"""
r = []
for a, b in self.pairs:
if a == b:
r.append(str(a))
else:
r.append("%d-%d" % (a, b))
return ",".join(r)
def __len__(self):
"""The length of the entire span, ignoring holes.
>>> (len(Ranges('99')), len(Ranges('1-2')), len(Ranges('')))
(1, 2, 0)
"""
if self.a is None or self.b is None:
return 0
# Result must fit an int
return min(self.b - self.a + 1, sys.maxint)
def __nonzero__(self):
"""Return True iff the range is not empty.
>>> (bool(Ranges()), bool(Ranges('1-2')))
(False, True)
"""
return self.a is not None and self.b is not None
def truncate(self, max):
"""Truncate the Ranges by setting a maximal allowed value.
Note that this `max` can be a value in a gap, so the only guarantee
is that `self.b` will be lesser than or equal to `max`.
>>> r = Ranges("10-20,25-45")
>>> str(r.truncate(30))
'10-20,25-30'
>>> str(r.truncate(22))
'10-20'
>>> str(r.truncate(10))
'10'
"""
r = Ranges()
r.a, r.b, r.reorder = self.a, self.b, self.reorder
r.pairs = []
for a, b in self.pairs:
if a <= max:
if b > max:
r.pairs.append((a, max))
r.b = max
break
r.pairs.append((a, b))
else:
break
return r
def to_ranges(revs):
"""Converts a list of revisions to a minimal set of ranges.
>>> to_ranges([2, 12, 3, 6, 9, 1, 5, 11])
'1-3,5-6,9,11-12'
>>> to_ranges([])
''
"""
ranges = []
begin = end = None
def store():
if end == begin:
ranges.append(str(begin))
else:
ranges.append('%d-%d' % (begin, end))
for rev in sorted(revs):
if begin is None:
begin = end = rev
elif rev == end + 1:
end = rev
else:
store()
begin = end = rev
if begin is not None:
store()
return ','.join(ranges)
class lazy(object):
"""A lazily-evaluated attribute"""
def __init__(self, fn):
self.fn = fn
def __get__(self, instance, owner):
if instance is None:
return self
result = self.fn(instance)
setattr(instance, self.fn.__name__, result)
return result
# -- algorithmic utilities
DIGITS = re.compile(r'(\d+)')
def embedded_numbers(s):
"""Comparison function for natural order sorting based on
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/214202."""
pieces = DIGITS.split(s)
pieces[1::2] = map(int, pieces[1::2])
return pieces
def pairwise(iterable):
"""
>>> list(pairwise([0, 1, 2, 3]))
[(0, 1), (1, 2), (2, 3)]
.. deprecated :: 0.11
if this really needs to be used, rewrite it without izip
"""
a, b = tee(iterable)
try:
b.next()
except StopIteration:
pass
return izip(a, b)
def partition(iterable, order=None):
"""
>>> partition([(1, "a"), (2, "b"), (3, "a")])
{'a': [1, 3], 'b': [2]}
>>> partition([(1, "a"), (2, "b"), (3, "a")], "ab")
[[1, 3], [2]]
"""
result = {}
if order is not None:
for key in order:
result[key] = []
for item, category in iterable:
result.setdefault(category, []).append(item)
if order is None:
return result
return [result[key] for key in order]
def as_int(s, default, min=None, max=None):
"""Convert s to an int and limit it to the given range, or return default
if unsuccessful."""
try:
value = int(s)
except (TypeError, ValueError):
return default
if min is not None and value < min:
value = min
if max is not None and value > max:
value = max
return value
def as_bool(value):
"""Convert the given value to a `bool`.
If `value` is a string, return `True` for any of "yes", "true", "enabled",
"on" or non-zero numbers, ignoring case. For non-string arguments, return
the argument converted to a `bool`, or `False` if the conversion fails.
"""
if isinstance(value, basestring):
try:
return bool(float(value))
except ValueError:
return value.strip().lower() in ('yes', 'true', 'enabled', 'on')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/text.py | trac/trac/util/text.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
# Christian Boos <cboos@edgewall.org>
import __builtin__
import locale
import os
import re
import sys
import textwrap
from urllib import quote, quote_plus, unquote
from unicodedata import east_asian_width
from trac.util.translation import _
CRLF = '\r\n'
class Empty(unicode):
"""A special tag object evaluating to the empty string"""
__slots__ = []
empty = Empty()
del Empty # shouldn't be used outside of Trac core
# -- Unicode
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
def exception_to_unicode(e, traceback=False):
"""Convert an `Exception` to an `unicode` object.
In addition to `to_unicode`, this representation of the exception
also contains the class name and optionally the traceback.
"""
message = '%s: %s' % (e.__class__.__name__, to_unicode(e))
if traceback:
from trac.util import get_last_traceback
traceback_only = get_last_traceback().split('\n')[:-2]
message = '\n%s\n%s' % (to_unicode('\n'.join(traceback_only)), message)
return message
def path_to_unicode(path):
"""Convert a filesystem path to unicode, using the filesystem encoding."""
if isinstance(path, str):
try:
return unicode(path, sys.getfilesystemencoding())
except UnicodeDecodeError:
return unicode(path, 'latin1')
return unicode(path)
_ws_leading_re = re.compile(ur'\A[\s\u200b]+', re.UNICODE)
_ws_trailing_re = re.compile(ur'[\s\u200b]+\Z', re.UNICODE)
def stripws(text, leading=True, trailing=True):
"""Strips unicode white-spaces and ZWSPs from ``text``.
:param leading: strips leading spaces from ``text`` unless ``leading`` is
`False`.
:param trailing: strips trailing spaces from ``text`` unless ``trailing``
is `False`.
"""
if leading:
text = _ws_leading_re.sub('', text)
if trailing:
text = _ws_trailing_re.sub('', text)
return text
def strip_line_ws(text, leading=True, trailing=True):
"""Strips unicode white-spaces and ZWSPs from each line of ``text``.
:param leading: strips leading spaces from ``text`` unless ``leading`` is
`False`.
:param trailing: strips trailing spaces from ``text`` unless ``trailing``
is `False`.
"""
lines = re.compile(r'(\n|\r\n|\r)').split(text)
if leading:
lines[::2] = (_ws_leading_re.sub('', line) for line in lines[::2])
if trailing:
lines[::2] = (_ws_trailing_re.sub('', line) for line in lines[::2])
return ''.join(lines)
_js_quote = {'\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f',
'\n': '\\n', '\r': '\\r', '\t': '\\t', "'": "\\'"}
for i in range(0x20) + [ord(c) for c in '&<>']:
_js_quote.setdefault(chr(i), '\\u%04x' % i)
_js_quote_re = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t\'&<>]')
_js_string_re = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t&<>]')
def javascript_quote(text):
"""Quote strings for inclusion in single or double quote delimited
Javascript strings
"""
if not text:
return ''
def replace(match):
return _js_quote[match.group(0)]
return _js_quote_re.sub(replace, text)
def to_js_string(text):
"""Embed the given string in a double quote delimited Javascript string
(conform to the JSON spec)
"""
if not text:
return '""'
def replace(match):
return _js_quote[match.group(0)]
return '"%s"' % _js_string_re.sub(replace, text)
def unicode_quote(value, safe='/'):
"""A unicode aware version of `urllib.quote`
:param value: anything that converts to a `str`. If `unicode`
input is given, it will be UTF-8 encoded.
:param safe: as in `quote`, the characters that would otherwise be
quoted but shouldn't here (defaults to '/')
"""
return quote(value.encode('utf-8') if isinstance(value, unicode)
else str(value), safe)
def unicode_quote_plus(value, safe=''):
"""A unicode aware version of `urllib.quote_plus`.
:param value: anything that converts to a `str`. If `unicode`
input is given, it will be UTF-8 encoded.
:param safe: as in `quote_plus`, the characters that would
otherwise be quoted but shouldn't here (defaults to
'/')
"""
return quote_plus(value.encode('utf-8') if isinstance(value, unicode)
else str(value), safe)
def unicode_unquote(value):
"""A unicode aware version of `urllib.unquote`.
:param str: UTF-8 encoded `str` value (for example, as obtained by
`unicode_quote`).
:rtype: `unicode`
"""
return unquote(value).decode('utf-8')
def unicode_urlencode(params, safe=''):
"""A unicode aware version of `urllib.urlencode`.
Values set to `empty` are converted to the key alone, without the
equal sign.
"""
if isinstance(params, dict):
params = params.iteritems()
l = []
for k, v in params:
if v is empty:
l.append(unicode_quote_plus(k, safe))
else:
l.append(unicode_quote_plus(k, safe) + '=' +
unicode_quote_plus(v, safe))
return '&'.join(l)
_qs_quote_safe = ''.join(chr(c) for c in xrange(0x21, 0x7f))
def quote_query_string(text):
"""Quote strings for query string
"""
return unicode_quote_plus(text, _qs_quote_safe)
def to_utf8(text, charset='latin1'):
"""Convert a string to UTF-8, assuming the encoding is either UTF-8, ISO
Latin-1, or as specified by the optional `charset` parameter.
.. deprecated :: 0.10
You should use `unicode` strings only.
"""
try:
# Do nothing if it's already utf-8
u = unicode(text, 'utf-8')
return text
except UnicodeError:
try:
# Use the user supplied charset if possible
u = unicode(text, charset)
except UnicodeError:
# This should always work
u = unicode(text, 'latin1')
return u.encode('utf-8')
class unicode_passwd(unicode):
"""Conceal the actual content of the string when `repr` is called."""
def __repr__(self):
return '*******'
def stream_encoding(stream):
"""Return the appropriate encoding for the given stream."""
encoding = getattr(stream, 'encoding', None)
# Windows returns 'cp0' to indicate no encoding
return encoding if encoding not in (None, 'cp0') else 'utf-8'
def console_print(out, *args, **kwargs):
"""Output the given arguments to the console, encoding the output
as appropriate.
:param kwargs: ``newline`` controls whether a newline will be appended
(defaults to `True`)
"""
cons_charset = stream_encoding(out)
out.write(' '.join([to_unicode(a).encode(cons_charset, 'replace')
for a in args]))
if kwargs.get('newline', True):
out.write('\n')
def printout(*args, **kwargs):
"""Do a `console_print` on `sys.stdout`."""
console_print(sys.stdout, *args, **kwargs)
def printerr(*args, **kwargs):
"""Do a `console_print` on `sys.stderr`."""
console_print(sys.stderr, *args, **kwargs)
def raw_input(prompt):
"""Input one line from the console and converts it to unicode as
appropriate.
"""
printout(prompt, newline=False)
return to_unicode(__builtin__.raw_input(), sys.stdin.encoding)
_preferredencoding = locale.getpreferredencoding()
def getpreferredencoding():
"""Return the encoding, which is retrieved on ahead, according to user
preference.
We should use this instead of `locale.getpreferredencoding()` which
is not thread-safe."""
return _preferredencoding
# -- Plain text formatting
def text_width(text, ambiwidth=1):
"""Determine the column width of `text` in Unicode characters.
The characters in the East Asian Fullwidth (F) or East Asian Wide (W)
have a column width of 2. The other characters in the East Asian
Halfwidth (H) or East Asian Narrow (Na) have a column width of 1.
That `ambiwidth` parameter is used for the column width of the East
Asian Ambiguous (A). If `1`, the same width as characters in US-ASCII.
This is expected by most users. If `2`, twice the width of US-ASCII
characters. This is expected by CJK users.
cf. http://www.unicode.org/reports/tr11/.
"""
twice = 'FWA' if ambiwidth == 2 else 'FW'
return sum([2 if east_asian_width(chr) in twice else 1
for chr in to_unicode(text)])
_default_ambiwidth = 1 # Default width of East Asian Ambiguous (A)
if os.name == 'nt':
try:
# `ctypes` is available since Python 2.5
import ctypes
codepage = ctypes.windll.kernel32.GetConsoleOutputCP()
except ImportError:
# Try to retrieve the codepage from stderr and stdout
codepage = (sys.stderr.encoding or sys.stdout.encoding or '')[2:]
codepage = codepage.isdigit() and int(codepage) or 0
if codepage in (932, # Japanese (Shift-JIS)
936, # Chinese Simplified (GB2312)
949, # Korean (Unified Hangul Code)
950): # Chinese Traditional (Big5)
_default_ambiwidth = 2
del codepage
else:
if re.match(r'zh|ja|kr', os.environ.get('LANG') or '', re.IGNORECASE):
_default_ambiwidth = 2
def print_table(data, headers=None, sep=' ', out=None, ambiwidth=None):
"""Print data according to a tabular layout.
:param data: a sequence of rows; assume all rows are of equal length.
:param headers: an optional row containing column headers; must be of
the same length as each row in `data`.
:param sep: column separator
:param out: output file descriptor (`None` means use `sys.stdout`)
:param ambiwidth: column width of the East Asian Ambiguous (A). If None,
detect ambiwidth with the locale settings. If others,
pass to the `ambiwidth` parameter of `text_width`.
"""
if out is None:
out = sys.stdout
charset = getattr(out, 'encoding', None) or 'utf-8'
if ambiwidth is None:
ambiwidth = _default_ambiwidth
data = list(data)
if headers:
data.insert(0, headers)
elif not data:
return
# Convert to an unicode object with `to_unicode`. If None, convert to a
# empty string.
def to_text(val):
if val is None:
return u''
return to_unicode(val)
def tw(text):
return text_width(text, ambiwidth=ambiwidth)
# Convert each cell to an unicode object
data = [[to_text(cell) for cell in row] for row in data]
num_cols = len(data[0])
col_width = [max(tw(row[idx]) for row in data)
for idx in xrange(num_cols)]
out.write('\n')
for ridx, row in enumerate(data):
for cidx, cell in enumerate(row):
if headers and ridx == 0:
sp = '%*s' % (tw(sep), ' ') # No separator in header
else:
sp = sep
if cidx + 1 == num_cols:
sp = '' # No separator after last column
line = u'%-*s%s' % (col_width[cidx] - tw(cell) + len(cell),
cell, sp)
line = line.encode(charset, 'replace')
out.write(line)
out.write('\n')
if ridx == 0 and headers:
out.write('-' * (tw(sep) * cidx + sum(col_width)))
out.write('\n')
out.write('\n')
def shorten_line(text, maxlen=75):
"""Truncates content to at most `maxlen` characters.
This tries to be (a bit) clever and attempts to find a proper word
boundary for doing so.
"""
if len(text or '') < maxlen:
return text
cut = max(text.rfind(' ', 0, maxlen), text.rfind('\n', 0, maxlen))
if cut < 0:
cut = maxlen
return text[:cut] + ' ...'
class UnicodeTextWrapper(textwrap.TextWrapper):
breakable_char_ranges = [
(0x1100, 0x11FF), # Hangul Jamo
(0x2E80, 0x2EFF), # CJK Radicals Supplement
(0x3000, 0x303F), # CJK Symbols and Punctuation
(0x3040, 0x309F), # Hiragana
(0x30A0, 0x30FF), # Katakana
(0x3130, 0x318F), # Hangul Compatibility Jamo
(0x3190, 0x319F), # Kanbun
(0x31C0, 0x31EF), # CJK Strokes
(0x3200, 0x32FF), # Enclosed CJK Letters and Months
(0x3300, 0x33FF), # CJK Compatibility
(0x3400, 0x4DBF), # CJK Unified Ideographs Extension A
(0x4E00, 0x9FFF), # CJK Unified Ideographs
(0xA960, 0xA97F), # Hangul Jamo Extended-A
(0xAC00, 0xD7AF), # Hangul Syllables
(0xD7B0, 0xD7FF), # Hangul Jamo Extended-B
(0xF900, 0xFAFF), # CJK Compatibility Ideographs
(0xFE30, 0xFE4F), # CJK Compatibility Forms
(0xFF00, 0xFFEF), # Halfwidth and Fullwidth Forms
(0x20000, 0x2FFFF, u'[\uD840-\uD87F][\uDC00-\uDFFF]'), # Plane 2
(0x30000, 0x3FFFF, u'[\uD880-\uD8BF][\uDC00-\uDFFF]'), # Plane 3
]
split_re = None
breakable_re = None
@classmethod
def _init_patterns(cls):
char_ranges = []
surrogate_pairs = []
for val in cls.breakable_char_ranges:
try:
high = unichr(val[0])
low = unichr(val[1])
char_ranges.append(u'%s-%s' % (high, low))
except ValueError:
# Narrow build, `re` cannot use characters >= 0x10000
surrogate_pairs.append(val[2])
char_ranges = u''.join(char_ranges)
if surrogate_pairs:
pattern = u'(?:[%s]|%s)+' % (char_ranges,
u'|'.join(surrogate_pairs))
else:
pattern = u'[%s]+' % char_ranges
cls.split_re = re.compile(
ur'(\s+|' + # any whitespace
pattern + u'|' + # breakable text
ur'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' + # hyphenated words
ur'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))', # em-dash
re.UNICODE)
cls.breakable_re = re.compile(ur'\A' + pattern, re.UNICODE)
def __init__(self, cols, replace_whitespace=0, break_long_words=0,
initial_indent='', subsequent_indent='', ambiwidth=1):
textwrap.TextWrapper.__init__(
self, cols, replace_whitespace=0, break_long_words=0,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
self.ambiwidth = ambiwidth
if self.split_re is None:
self._init_patterns()
def _split(self, text):
chunks = self.split_re.split(to_unicode(text))
chunks = filter(None, chunks)
return chunks
def _text_width(self, text):
return text_width(text, ambiwidth=self.ambiwidth)
def _wrap_chunks(self, chunks):
lines = []
chunks.reverse()
text_width = self._text_width
while chunks:
cur_line = []
cur_width = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - text_width(indent)
if chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
chunk = chunks[-1]
w = text_width(chunk)
if cur_width + w <= width:
cur_line.append(chunks.pop())
cur_width += w
elif self.breakable_re.match(chunk):
left_space = width - cur_width
for i in xrange(len(chunk)):
w = text_width(chunk[i])
if left_space < w:
break
left_space -= w
if i > 0:
cur_line.append(chunk[:i])
chunk = chunk[i:]
chunks[-1] = chunk
w = text_width(chunk)
break
else:
break
if chunks and w > width:
self._handle_long_word(chunks, cur_line, cur_width, width)
if cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def wrap(t, cols=75, initial_indent='', subsequent_indent='',
linesep=os.linesep, ambiwidth=1):
"""Wraps the single paragraph in `t`, which contains unicode characters.
The every line is at most `cols` characters long.
That `ambiwidth` parameter is used for the column width of the East
Asian Ambiguous (A). If `1`, the same width as characters in US-ASCII.
This is expected by most users. If `2`, twice the width of US-ASCII
characters. This is expected by CJK users.
"""
t = t.strip().replace('\r\n', '\n').replace('\r', '\n')
wrapper = UnicodeTextWrapper(cols, replace_whitespace=0,
break_long_words=0,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
ambiwidth=ambiwidth)
wrappedLines = []
for line in t.split('\n'):
wrappedLines += wrapper.wrap(line.rstrip()) or ['']
return linesep.join(wrappedLines)
def obfuscate_email_address(address):
"""Replace anything looking like an e-mail address (``'@something'``)
with a trailing ellipsis (``'@…'``)
"""
if address:
at = address.find('@')
if at != -1:
return address[:at] + u'@\u2026' + \
('>' if address[-1] == '>' else '')
return address
def breakable_path(path):
"""Make a path breakable after path separators, and conversely, avoid
breaking at spaces.
"""
if not path:
return path
prefix = ''
if path.startswith('/'): # Avoid breaking after a leading /
prefix = '/'
path = path[1:]
return prefix + path.replace('/', u'/\u200b').replace('\\', u'\\\u200b') \
.replace(' ', u'\u00a0')
def normalize_whitespace(text, to_space=u'\u00a0', remove=u'\u200b'):
"""Normalize whitespace in a string, by replacing special spaces by normal
spaces and removing zero-width spaces."""
if not text:
return text
for each in to_space:
text = text.replace(each, ' ')
for each in remove:
text = text.replace(each, '')
return text
def unquote_label(txt):
"""Remove (one level of) enclosing single or double quotes.
.. versionadded :: 1.0
"""
return txt[1:-1] if txt and txt[0] in "'\"" and txt[0] == txt[-1] else txt
# -- Conversion
def pretty_size(size, format='%.1f'):
"""Pretty print content size information with appropriate unit.
:param size: number of bytes
:param format: can be used to adjust the precision shown
"""
if size is None:
return ''
jump = 1024
if size < jump:
return _('%(size)s bytes', size=size)
units = ['KB', 'MB', 'GB', 'TB']
i = 0
while size >= jump and i < len(units):
i += 1
size /= 1024.
return (format + ' %s') % (size, units[i - 1])
def expandtabs(s, tabstop=8, ignoring=None):
"""Expand tab characters `'\\\\t'` into spaces.
:param tabstop: number of space characters per tab
(defaults to the canonical 8)
:param ignoring: if not `None`, the expansion will be "smart" and
go from one tabstop to the next. In addition,
this parameter lists characters which can be
ignored when computing the indent.
"""
if '\t' not in s:
return s
if ignoring is None:
return s.expandtabs(tabstop)
outlines = []
for line in s.split('\n'):
if '\t' not in line:
outlines.append(line)
continue
p = 0
s = []
for c in line:
if c == '\t':
n = tabstop - p % tabstop
s.append(' ' * n)
p += n
elif not ignoring or c not in ignoring:
p += 1
s.append(c)
else:
s.append(c)
outlines.append(''.join(s))
return '\n'.join(outlines)
def fix_eol(text, eol):
"""Fix end-of-lines in a text."""
lines = text.splitlines()
lines.append('')
return eol.join(lines)
def unicode_to_base64(text, strip_newlines=True):
"""Safe conversion of ``text`` to base64 representation using
utf-8 bytes.
Strips newlines from output unless ``strip_newlines`` is `False`.
"""
text = to_unicode(text)
if strip_newlines:
return text.encode('utf-8').encode('base64').replace('\n', '')
return text.encode('utf-8').encode('base64')
def unicode_from_base64(text):
"""Safe conversion of ``text`` to unicode based on utf-8 bytes."""
return text.decode('base64').decode('utf-8')
def levenshtein_distance(lhs, rhs):
"""Return the Levenshtein distance between two strings."""
if len(lhs) > len(rhs):
rhs, lhs = lhs, rhs
if not lhs:
return len(rhs)
prev = range(len(rhs) + 1)
for lidx, lch in enumerate(lhs):
curr = [lidx + 1]
for ridx, rch in enumerate(rhs):
cost = (lch != rch) * 2
curr.append(min(prev[ridx + 1] + 1, # deletion
curr[ridx] + 1, # insertion
prev[ridx] + cost)) # substitution
prev = curr
return prev[-1]
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/concurrency.py | trac/trac/util/concurrency.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
try:
import threading
except ImportError:
import dummy_threading as threading
threading._get_ident = lambda: 0
class ThreadLocal(threading.local):
"""A thread-local storage allowing to set default values on construction.
"""
def __init__(self, **kwargs):
threading.local.__init__(self)
self.__dict__.update(kwargs)
def get_thread_id():
return threading._get_ident()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/tests/html.py | trac/trac/util/tests/html.py | # -*- coding: utf-8 -*-
import unittest
from genshi.input import HTML
from trac.util.html import TracHTMLSanitizer
class TracHTMLSanitizerTestCase(unittest.TestCase):
def test_expression(self):
html = HTML('<div style="top:expression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_capital_expression(self):
html = HTML('<div style="top:EXPRESSION(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_expression_with_comments(self):
html = HTML(r'<div style="top:exp/**/ression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual('<div style="top:exp ression(alert())">XSS</div>',
unicode(html | TracHTMLSanitizer()))
html = HTML(r'<div style="top:exp//**/**/ression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual(
'<div style="top:exp/ **/ression(alert())">XSS</div>',
unicode(html | TracHTMLSanitizer()))
html = HTML(r'<div style="top:ex/*p*/ression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual('<div style="top:ex ression(alert())">XSS</div>',
unicode(html | TracHTMLSanitizer()))
def test_url_with_javascript(self):
html = HTML('<div style="background-image:url(javascript:alert())">'
'XSS</div>', encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_capital_url_with_javascript(self):
html = HTML('<div style="background-image:URL(javascript:alert())">'
'XSS</div>', encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_unicode_escapes(self):
html = HTML(r'<div style="top:exp\72 ess\000069 on(alert())">'
r'XSS</div>', encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
# escaped backslash
html = HTML(r'<div style="top:exp\5c ression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual(r'<div style="top:exp\\ression(alert())">XSS</div>',
unicode(html | TracHTMLSanitizer()))
html = HTML(r'<div style="top:exp\5c 72 ession(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual(r'<div style="top:exp\\72 ession(alert())">XSS</div>',
unicode(html | TracHTMLSanitizer()))
# escaped control characters
html = HTML(r'<div style="top:exp\000000res\1f sion(alert())">'
r'XSS</div>', encoding='utf-8')
self.assertEqual('<div style="top:exp res sion(alert())">XSS</div>',
unicode(html | TracHTMLSanitizer()))
def test_backslash_without_hex(self):
html = HTML(r'<div style="top:e\xp\ression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML(r'<div style="top:e\\xp\\ression(alert())">XSS</div>',
encoding='utf-8')
self.assertEqual(r'<div style="top:e\\xp\\ression(alert())">'
'XSS</div>',
unicode(html | TracHTMLSanitizer()))
def test_unsafe_props(self):
html = HTML('<div style="POSITION:RELATIVE">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML('<div style="position:STATIC">safe</div>',
encoding='utf-8')
self.assertEqual('<div style="position:STATIC">safe</div>',
unicode(html | TracHTMLSanitizer()))
html = HTML('<div style="behavior:url(test.htc)">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML('<div style="-ms-behavior:url(test.htc) url(#obj)">'
'XSS</div>', encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML("""<div style="-o-link:'javascript:alert(1)';"""
"""-o-link-source:current">XSS</div>""", encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML("""<div style="-moz-binding:url(xss.xbl)">XSS</div>""",
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_nagative_margin(self):
html = HTML('<div style="margin-top:-9999px">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML('<div style="margin:0 -9999px">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_css_hack(self):
html = HTML('<div style="*position:static">XSS</div>',
encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
html = HTML('<div style="_margin:-10px">XSS</div>', encoding='utf-8')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_property_name(self):
html = HTML('<div style="display:none;border-left-color:red;'
'user_defined:1;-moz-user-selct:-moz-all">prop</div>',
encoding='utf-8')
self.assertEqual('<div style="display:none; border-left-color:red'
'">prop</div>',
unicode(html | TracHTMLSanitizer()))
def test_unicode_expression(self):
# Fullwidth small letters
html = HTML(u'<div style="top:expression(alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
# Fullwidth capital letters
html = HTML(u'<div style="top:EXPRESSION(alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
# IPA extensions
html = HTML(u'<div style="top:expʀessɪoɴ(alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def test_unicode_url(self):
# IPA extensions
html = HTML(u'<div style="background-image:uʀʟ(javascript:alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | TracHTMLSanitizer()))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TracHTMLSanitizerTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/tests/datefmt.py | trac/trac/util/tests/datefmt.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# Copyright (C) 2007 Matt Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Matt Good <trac@matt-good.net>
import datetime
import os
import time
import unittest
from trac.core import TracError
from trac.util import datefmt, translation
try:
import pytz
except ImportError:
pytz = None
try:
from babel import Locale
except ImportError:
Locale = None
if pytz is None:
PytzTestCase = None
else:
class PytzTestCase(unittest.TestCase):
def test_pytz_conversion(self):
tz = datefmt.get_timezone('GMT +3:00')
self.assertEqual(datetime.timedelta(hours=3),
tz.utcoffset(None))
def test_posix_conversion(self):
tz = datefmt.get_timezone('Etc/GMT-4')
self.assertEqual(datetime.timedelta(hours=4),
tz.utcoffset(None))
self.assertEqual('GMT +4:00', tz.zone)
def test_unicode_input(self):
tz = datefmt.get_timezone(u'Etc/GMT-4')
self.assertEqual(datetime.timedelta(hours=4),
tz.utcoffset(None))
self.assertEqual('GMT +4:00', tz.zone)
def test_parse_date(self):
tz = datefmt.get_timezone('Europe/Zurich')
t_utc = datetime.datetime(2009, 12, 1, 11, 0, 0, 0, datefmt.utc)
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T12:00:00', tz))
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01 12:00:00', tz))
def test_parse_date_dst(self):
tz = datefmt.get_timezone('Europe/Zurich')
t_utc = datetime.datetime(2009, 8, 1, 10, 0, 0, 0, datefmt.utc)
self.assertEqual(t_utc,
datefmt.parse_date('2009-08-01T12:00:00', tz))
self.assertEqual(t_utc,
datefmt.parse_date('2009-08-01 12:00:00', tz))
def test_parse_date_across_dst_boundary(self):
tz = datefmt.get_timezone('Europe/Zurich')
# DST start - 31 March, 02:00
format = '%Y-%m-%d %H:%M:%S %Z%z'
expected = '2002-03-31 03:30:00 CEST+0200'
# iso8601
t = datefmt.parse_date('2002-03-31T02:30:00', tz)
self.assertEqual(expected, t.strftime(format))
# strptime
t = datetime.datetime(2002, 3, 31, 2, 30)
t = datefmt.parse_date(t.strftime('%x %X'), tz)
self.assertEqual(expected, t.strftime(format))
# i18n datetime
if Locale:
en_US = Locale.parse('en_US')
t = datefmt.parse_date('Mar 31, 2002 02:30', tz, en_US)
self.assertEqual(expected, t.strftime(format))
def test_to_datetime_pytz_normalize(self):
tz = datefmt.get_timezone('Europe/Zurich')
date = datefmt.to_datetime(datetime.date(2002, 3, 31), tz)
format = '%Y-%m-%d %H:%M:%S %Z%z'
expected = '2002-03-31 00:00:00 CET+0100'
self.assertEqual(expected, date.strftime(format))
def test_to_datetime_normalized(self):
tz = datefmt.get_timezone('Europe/Paris')
t = datetime.datetime(2012, 3, 25, 2, 15)
dt = datefmt.to_datetime(t, tz)
self.assertEqual(datetime.timedelta(0, 7200), dt.utcoffset())
def test_parse_date_across_dst_boundary(self):
tz = datefmt.get_timezone('Europe/Zurich')
# DST start - 31 March, 02:00
format = '%Y-%m-%d %H:%M:%S %Z%z'
expected = '2002-03-31 03:30:00 CEST+0200'
# iso8601
t = datefmt.parse_date('2002-03-31T02:30:00', tz)
self.assertEqual(expected, t.strftime(format))
# strptime
t = datetime.datetime(2002, 3, 31, 2, 30)
t = datefmt.parse_date(t.strftime('%x %X'), tz)
self.assertEqual(expected, t.strftime(format))
def test_to_datetime_astimezone(self):
tz = datefmt.get_timezone('Europe/Paris')
t = datetime.datetime(2012, 3, 25, 2, 15, tzinfo=datefmt.utc)
dt = datefmt.to_datetime(t, tz)
self.assertEqual(datetime.timedelta(0, 7200), dt.utcoffset())
def test_to_datetime_tz_from_naive_datetime_is_localtz(self):
t = datetime.datetime(2012, 3, 25, 2, 15)
dt = datefmt.to_datetime(t)
self.assert_(isinstance(dt.tzinfo, datefmt.LocalTimezone))
def test_to_datetime_tz_from_now_is_localtz(self):
dt = datefmt.to_datetime(None)
self.assert_(isinstance(dt.tzinfo, datefmt.LocalTimezone))
class ParseISO8601TestCase(unittest.TestCase):
def test_iso8601_second(self):
t = datetime.datetime(2012, 10, 11, 2, 40, 57, 0, datefmt.utc)
self.assertEqual(t, datefmt.parse_date('2012-10-11T02:40:57Z'))
self.assertEqual(t, datefmt.parse_date('2012-10-10T14:40:57-12:00'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T02:40:57+00:00'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T02:40:57-00:00'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T08:25:57+05:45'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T16:40:57+14:00'))
self.assertEqual(t, datefmt.parse_date('20121011T024057Z'))
self.assertEqual(t, datefmt.parse_date('20121010T144057-1200'))
self.assertEqual(t, datefmt.parse_date('20121011T024057+0000'))
self.assertEqual(t, datefmt.parse_date('20121011T024057-0000'))
self.assertEqual(t, datefmt.parse_date('20121011T082557+0545'))
self.assertEqual(t, datefmt.parse_date('20121011T164057+1400'))
def test_iso8601_minute(self):
t = datetime.datetime(2012, 10, 11, 2, 40, 0, 0, datefmt.utc)
self.assertEqual(t, datefmt.parse_date('2012-10-11T02:40Z'))
self.assertEqual(t, datefmt.parse_date('2012-10-10T14:40-12:00'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T16:40+14:00'))
self.assertEqual(t, datefmt.parse_date('20121011T0240Z'))
self.assertEqual(t, datefmt.parse_date('20121010T1440-1200'))
self.assertEqual(t, datefmt.parse_date('20121011T1640+1400'))
def test_iso8601_hour(self):
t = datetime.datetime(2012, 10, 11, 2, 0, 0, 0, datefmt.utc)
self.assertEqual(t, datefmt.parse_date('2012-10-11T02Z'))
self.assertEqual(t, datefmt.parse_date('2012-10-10T14-12'))
self.assertEqual(t, datefmt.parse_date('2012-10-10T14-12:00'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T16+14'))
self.assertEqual(t, datefmt.parse_date('2012-10-11T16+14:00'))
self.assertEqual(t, datefmt.parse_date('20121011T02Z'))
self.assertEqual(t, datefmt.parse_date('20121010T14-12'))
self.assertEqual(t, datefmt.parse_date('20121010T14-1200'))
self.assertEqual(t, datefmt.parse_date('20121011T16+1400'))
self.assertEqual(t, datefmt.parse_date('20121011T16+14'))
def test_iso8601_day(self):
t = datetime.datetime(2012, 10, 11, 0, 0, 0, 0, datefmt.localtz)
self.assertEqual(t, datefmt.parse_date('2012-10-11'))
self.assertEqual(t, datefmt.parse_date('20121011'))
def test_iso8601_month(self):
t = datetime.datetime(2012, 10, 1, 0, 0, 0, 0, datefmt.localtz)
self.assertEqual(t, datefmt.parse_date('2012-10'))
self.assertEqual(t, datefmt.parse_date('201210'))
def test_iso8601_year(self):
t = datetime.datetime(2012, 1, 1, 0, 0, 0, 0, datefmt.localtz)
self.assertEqual(t, datefmt.parse_date('2012'))
def test_iso8601_tz(self):
self.assertEqual(
datetime.timedelta(),
datefmt.parse_date('2012-10-11T02:40:57Z').utcoffset())
self.assertEqual(
datetime.timedelta(hours=-12),
datefmt.parse_date('2012-10-10T14:40:57-12').utcoffset())
self.assertEqual(
datetime.timedelta(hours=-9, minutes=-30),
datefmt.parse_date('2012-10-10T17:10:57-09:30').utcoffset())
self.assertEqual(
datetime.timedelta(),
datefmt.parse_date('2012-10-11T02:40:57+00:00').utcoffset())
self.assertEqual(
datetime.timedelta(),
datefmt.parse_date('2012-10-11T02:40:57-00:00').utcoffset())
self.assertEqual(
datetime.timedelta(hours=5, minutes=45),
datefmt.parse_date('2012-10-11T08:25:57+05:45').utcoffset())
def test_iso8601_naive_tz_is_localtz(self):
t = datetime.datetime(2012, 10, 11, 2, 40, 57, 0, datefmt.localtz)
dt = datefmt.parse_date('2012-10-11T02:40:57')
self.assertEqual(t, dt)
self.assert_(isinstance(dt.tzinfo, datefmt.LocalTimezone))
def test_iso8601_naive_tz_used_tzinfo_arg(self):
tz = datefmt.timezone('GMT +1:00')
t = datetime.datetime(2012, 10, 11, 2, 40, 57, 0, tz)
dt = datefmt.parse_date('2012-10-11T02:40:57', tz)
self.assertEqual(t, dt)
self.assertEqual(tz, dt.tzinfo)
self.assertEqual(datetime.timedelta(hours=1), dt.utcoffset())
def test_iso8601_tz_not_used_tzinfo_arg(self):
tz = datefmt.timezone('GMT +1:00')
dt = datefmt.parse_date('2012-10-10T17:10:57-09:30', tz)
self.assertEqual(datetime.timedelta(hours=-9, minutes=-30),
dt.utcoffset())
if pytz:
def test_iso8601_naive_tz_normalize_non_existent_time(self):
t = datetime.datetime(2012, 3, 25, 1, 15, 57, 0, datefmt.utc)
tz = datefmt.timezone('Europe/Paris')
dt = datefmt.parse_date('2012-03-25T02:15:57', tz)
self.assertEqual(t, dt)
self.assertEqual(3, dt.hour)
self.assertEqual(datetime.timedelta(hours=2), dt.utcoffset())
def test_iso8601_naive_tz_normalize_ambiguous_time(self):
t = datetime.datetime(2011, 10, 31, 1, 15, 57, 0, datefmt.utc)
tz = datefmt.timezone('Europe/Paris')
dt = datefmt.parse_date('2011-10-31T02:15:57', tz)
self.assertEqual(t, dt)
self.assertEqual(2, dt.hour)
self.assertEqual(datetime.timedelta(hours=1), dt.utcoffset())
class ParseRelativeDateTestCase(unittest.TestCase):
def test_time_interval_seconds(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
past_42s = datetime.datetime(2012, 3, 25, 3, 14, 39, 987654, tzinfo=tz)
self.assertEqual(
past_42s,
datefmt._parse_relative_time('42second ago', tz, now))
self.assertEqual(
past_42s,
datefmt._parse_relative_time('42 secondsago', tz, now))
self.assertEqual(
past_42s,
datefmt._parse_relative_time('42 second', tz, now))
self.assertEqual(
past_42s,
datefmt._parse_relative_time('42seconds', tz, now))
self.assertEqual(
None,
datefmt._parse_relative_time('42s ago', tz, now))
self.assertEqual(
None,
datefmt._parse_relative_time('42s', tz, now))
def test_time_interval_minutes(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(
datetime.datetime(2012, 3, 25, 2, 54, 21, 987654, tzinfo=tz),
datefmt._parse_relative_time('21minute', tz, now))
self.assertEqual(
datetime.datetime(2012, 3, 25, 2, 54, 6, 987654, tzinfo=tz),
datefmt._parse_relative_time('21.25 minutes', tz, now))
def test_time_interval_hours(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
past_42h = datetime.datetime(2012, 3, 23, 9, 15, 21, 987654, tzinfo=tz)
self.assertEqual(
past_42h,
datefmt._parse_relative_time('42 hours', tz, now))
self.assertEqual(
past_42h,
datefmt._parse_relative_time('42h ago', tz, now))
def test_time_interval_days(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
past_24d = datetime.datetime(2012, 3, 1, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(
past_24d,
datefmt._parse_relative_time('24day', tz, now))
self.assertEqual(
past_24d,
datefmt._parse_relative_time('24ds', tz, now))
def test_time_interval_weeks(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
past_3w = datetime.datetime(2012, 3, 4, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(past_3w,
datefmt._parse_relative_time('3 weeks', tz, now))
self.assertEqual(past_3w,
datefmt._parse_relative_time('3w', tz, now))
def test_time_interval_months(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 1, 1, 3, 15, 21, 987654, tzinfo=tz)
past_12m = datetime.datetime(2011, 1, 6, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(
past_12m,
datefmt._parse_relative_time('12 months', tz, now))
self.assertEqual(
past_12m,
datefmt._parse_relative_time('12 ms ago', tz, now))
def test_time_interval_years(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
past_2y = datetime.datetime(2010, 3, 26, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(past_2y,
datefmt._parse_relative_time('2 years', tz, now))
self.assertEqual(past_2y, datefmt._parse_relative_time('2y', tz, now))
def test_time_start_now(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(now, datefmt._parse_relative_time('now', tz, now))
def test_time_start_today(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
today = datefmt.to_datetime(datetime.datetime(2012, 3, 25), tzinfo=tz)
self.assertEqual(today,
datefmt._parse_relative_time('today', tz, now))
self.assertEqual(today,
datefmt._parse_relative_time('this day', tz, now))
def test_time_start_yesterday(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
yesterday = datefmt.to_datetime(datetime.datetime(2012, 3, 24), tz)
self.assertEqual(yesterday,
datefmt._parse_relative_time('yesterday', tz, now))
self.assertEqual(yesterday,
datefmt._parse_relative_time('last day', tz, now))
def test_time_start_year(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 1, 1, tzinfo=tz),
datefmt._parse_relative_time('this year', tz, now))
self.assertEqual(datetime.datetime(2011, 1, 1, tzinfo=tz),
datefmt._parse_relative_time('last year', tz, now))
now = datetime.datetime(2009, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2009, 1, 1, tzinfo=tz),
datefmt._parse_relative_time('this year', tz, now))
self.assertEqual(datetime.datetime(2008, 1, 1, tzinfo=tz),
datefmt._parse_relative_time('last year', tz, now))
def test_time_start_month(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 1, 23, 3, 15, 42, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 1, 1, tzinfo=tz),
datefmt._parse_relative_time('this month', tz, now))
self.assertEqual(datetime.datetime(2011, 12, 1, tzinfo=tz),
datefmt._parse_relative_time('last month', tz, now))
def test_time_start_week(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 3, 19, tzinfo=tz),
datefmt._parse_relative_time('this week', tz, now))
self.assertEqual(datetime.datetime(2012, 3, 12, tzinfo=tz),
datefmt._parse_relative_time('last week', tz, now))
def test_time_start_day(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 1, 3, 15, 21, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 3, 1, tzinfo=tz),
datefmt._parse_relative_time('this day', tz, now))
self.assertEqual(datetime.datetime(2012, 2, 29, tzinfo=tz),
datefmt._parse_relative_time('last day', tz, now))
def test_time_start_hour(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 0, 15, 21, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 3, 25, 0, tzinfo=tz),
datefmt._parse_relative_time('this hour', tz, now))
self.assertEqual(datetime.datetime(2012, 3, 24, 23, tzinfo=tz),
datefmt._parse_relative_time('last hour', tz, now))
def test_time_start_minute(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 0, 21, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 3, 25, 3, 0, tzinfo=tz),
datefmt._parse_relative_time('this minute', tz, now))
self.assertEqual(datetime.datetime(2012, 3, 25, 2, 59, tzinfo=tz),
datefmt._parse_relative_time('last minute', tz, now))
def test_time_start_second(self):
tz = datefmt.timezone('GMT +1:00')
now = datetime.datetime(2012, 3, 25, 3, 15, 0, 987654, tzinfo=tz)
self.assertEqual(datetime.datetime(2012, 3, 25, 3, 15, 0, tzinfo=tz),
datefmt._parse_relative_time('this second', tz, now))
self.assertEqual(datetime.datetime(2012, 3, 25, 3, 14, 59, tzinfo=tz),
datefmt._parse_relative_time('last second', tz, now))
if pytz:
def test_time_interval_across_dst(self):
tz = datefmt.timezone('Europe/Paris')
now = datefmt.to_datetime(datetime.datetime(2012, 3, 25, 3, 0, 41),
tz)
dt = datefmt._parse_relative_time('41 seconds', tz, now)
self.assertEqual('2012-03-25T03:00:00+02:00', dt.isoformat())
dt = datefmt._parse_relative_time('42 seconds', tz, now)
self.assertEqual('2012-03-25T01:59:59+01:00', dt.isoformat())
def test_this_time_start_across_dst(self):
tz = datefmt.timezone('Europe/Paris')
now = datefmt.to_datetime(
datetime.datetime(2012, 3, 25, 3, 15, 21, 987654), tz)
dt = datefmt._parse_relative_time('this hour', tz, now)
self.assertEqual('2012-03-25T03:00:00+02:00', dt.isoformat())
dt = datefmt._parse_relative_time('today', tz, now)
self.assertEqual('2012-03-25T00:00:00+01:00', dt.isoformat())
dt = datefmt._parse_relative_time('this day', tz, now)
self.assertEqual('2012-03-25T00:00:00+01:00', dt.isoformat())
def test_last_time_start_across_dst(self):
tz = datefmt.timezone('Europe/Paris')
now = datefmt.to_datetime(datetime.datetime(2012, 3, 26, 3, 0, 41),
tz)
dt = datefmt._parse_relative_time('this day', tz, now)
self.assertEqual('2012-03-26T00:00:00+02:00', dt.isoformat())
dt = datefmt._parse_relative_time('yesterday', tz, now)
self.assertEqual('2012-03-25T00:00:00+01:00', dt.isoformat())
dt = datefmt._parse_relative_time('last day', tz, now)
self.assertEqual('2012-03-25T00:00:00+01:00', dt.isoformat())
class ParseDateValidRangeTestCase(unittest.TestCase):
def test_max_timestamp(self):
# At least all platforms support maximal signed 32 bits integer,
# 2**31 - 1, INT32_MAX.
datefmt.parse_date('2038-01-19T03:14:07Z')
try:
datefmt.parse_date('9999-12-31T23:59:59-12:00')
raise AssertionError('TracError not raised')
except TracError, e:
self.assert_('is outside valid range' in unicode(e))
def test_min_timestamp(self):
if os.name != 'nt':
# At least all Unix support minimal signed 32 bits integer,
# -(2**31), INT32_MIN
datefmt.parse_date('1901-12-13T20:45:52Z')
else:
# At least All VC run-times support 0 as time_t
datefmt.parse_date('1970-01-01T00:00:00Z')
try:
datefmt.parse_date('0001-01-01T00:00:00+14:00')
raise AssertionError('TracError not raised')
except TracError, e:
self.assert_('is outside valid range' in unicode(e))
class DateFormatTestCase(unittest.TestCase):
def test_to_datetime(self):
expected = datetime.datetime.fromtimestamp(23, datefmt.localtz)
self.assertEqual(datefmt.to_datetime(23), expected)
self.assertEqual(datefmt.to_datetime(23L), expected)
self.assertEqual(datefmt.to_datetime(23.0), expected)
def test_to_datetime_microsecond_timestamps(self):
expected = datetime.datetime.fromtimestamp(2345.678912,
datefmt.localtz)
self.assertEqual(datefmt.to_datetime(2345678912), expected)
self.assertEqual(datefmt.to_datetime(2345678912L), expected)
self.assertEqual(datefmt.to_datetime(2345678912.0), expected)
def test_to_datetime_microsecond_negative_timestamps(self):
# Work around issue1646728 in Python 2.4
expected = datetime.datetime.fromtimestamp(-2345, datefmt.localtz) \
- datetime.timedelta(seconds=.678912)
self.assertEqual(datefmt.to_datetime(-2345678912).microsecond,
321088) # 1000000 - 678912
self.assertEqual(datefmt.to_datetime(-2345678912), expected)
self.assertEqual(datefmt.to_datetime(-2345678912L), expected)
self.assertEqual(datefmt.to_datetime(-2345678912.0), expected)
if os.name == 'nt':
del test_to_datetime_microsecond_negative_timestamps
# negative timestamps not supported on Windows:
# ValueError: timestamp out of range for platform localtime()/gmtime()
def test_to_datetime_can_convert_dates(self):
expected = datetime.datetime(2009, 5, 2, tzinfo=datefmt.localtz)
self.assertEqual(datefmt.to_datetime(expected.date()), expected)
def test_to_datetime_tz(self):
tz = datefmt.timezone('GMT +1:00')
expected = datetime.datetime(1970, 1, 1, 1, 0, 23, 0, tz)
self.assertEqual(datefmt.to_datetime(23, tz), expected)
self.assertEqual(datefmt.to_datetime(23L, tz), expected)
self.assertEqual(datefmt.to_datetime(23.0, tz), expected)
tz = datefmt.timezone('GMT +4:00')
expected = datetime.datetime(1970, 1, 1, 4, 0, 23, 0, tz)
self.assertEqual(datefmt.to_datetime(23, tz), expected)
self.assertEqual(datefmt.to_datetime(23L, tz), expected)
self.assertEqual(datefmt.to_datetime(23.0, tz), expected)
def test_format_datetime_utc(self):
t = datetime.datetime(1970, 1, 1, 1, 0, 23, 0, datefmt.utc)
expected = '1970-01-01T01:00:23Z'
self.assertEqual(datefmt.format_datetime(t, '%Y-%m-%dT%H:%M:%SZ',
datefmt.utc), expected)
self.assertEqual(datefmt.format_datetime(t, 'iso8601',
datefmt.utc), expected)
self.assertEqual(datefmt.format_datetime(t, 'iso8601date',
datefmt.utc),
expected.split('T')[0])
self.assertEqual(datefmt.format_datetime(t, 'iso8601time',
datefmt.utc),
expected.split('T')[1])
self.assertEqual(datefmt.format_date(t, 'iso8601', datefmt.utc),
expected.split('T')[0])
self.assertEqual(datefmt.format_time(t, 'iso8601', datefmt.utc),
expected.split('T')[1])
def test_format_datetime_gmt01(self):
gmt01 = datefmt.FixedOffset(60, 'GMT +1:00')
t = datetime.datetime(1970, 1, 1, 1, 0, 23, 0, gmt01)
self.assertEqual('1970-01-01T01:00:23+0100',
datefmt.format_datetime(t, '%Y-%m-%dT%H:%M:%S%z',
gmt01))
expected = '1970-01-01T01:00:23+01:00'
self.assertEqual(datefmt.format_datetime(t, 'iso8601',
gmt01), expected)
self.assertEqual(datefmt.format_datetime(t, 'iso8601date', gmt01),
expected.split('T')[0])
self.assertEqual(datefmt.format_datetime(t, 'iso8601time', gmt01),
expected.split('T')[1])
self.assertEqual(datefmt.format_date(t, 'iso8601', gmt01),
expected.split('T')[0])
self.assertEqual(datefmt.format_time(t, 'iso8601', gmt01),
expected.split('T')[1])
def test_format_date_accepts_date_instances(self):
a_date = datetime.date(2009, 8, 20)
self.assertEqual('2009-08-20',
datefmt.format_date(a_date, format='%Y-%m-%d'))
def test_format_compatibility(self):
tz = datefmt.timezone('GMT +2:00')
t = datetime.datetime(2010, 8, 28, 11, 45, 56, 123456, datefmt.utc)
tz_t = datetime.datetime(2010, 8, 28, 13, 45, 56, 123456, tz)
# Converting babel's format to strftime format
self.assertEqual(tz_t.strftime('%x %H:%M').decode('utf-8'),
datefmt.format_datetime(t, 'short', tz))
self.assertEqual(tz_t.strftime('%x').decode('utf-8'),
datefmt.format_date(t, 'short', tz))
self.assertEqual(tz_t.strftime('%H:%M').decode('utf-8'),
datefmt.format_time(t, 'short', tz))
for f in ('medium', 'long', 'full'):
self.assertEqual(tz_t.strftime('%x %X').decode('utf-8'),
datefmt.format_datetime(t, f, tz))
self.assertEqual(tz_t.strftime('%x').decode('utf-8'),
datefmt.format_date(t, f, tz))
self.assertEqual(tz_t.strftime('%X').decode('utf-8'),
datefmt.format_time(t, f, tz))
class UTimestampTestCase(unittest.TestCase):
def test_sub_second(self):
t = datetime.datetime(2001, 2, 3, 4, 5, 6, 123456, datefmt.utc)
ts = datefmt.to_utimestamp(t)
self.assertEqual(981173106123456L, ts)
self.assertEqual(t, datefmt.from_utimestamp(ts))
class ISO8601TestCase(unittest.TestCase):
def test_default(self):
tz = datefmt.timezone('GMT +2:00')
t = datetime.datetime(2010, 8, 28, 11, 45, 56, 123456, tz)
self.assertEqual('2010-08-28',
datefmt.format_date(t, tzinfo=tz, locale='iso8601'))
self.assertEqual('11:45:56+02:00',
datefmt.format_time(t, tzinfo=tz, locale='iso8601'))
self.assertEqual('2010-08-28T11:45:56+02:00',
datefmt.format_datetime(t, tzinfo=tz,
locale='iso8601'))
def test_with_iso8601(self):
tz = datefmt.timezone('GMT +2:00')
t = datetime.datetime(2010, 8, 28, 11, 45, 56, 123456, tz)
self.assertEqual('2010-08-28',
datefmt.format_date(t, 'iso8601', tz, 'iso8601'))
self.assertEqual('11:45:56+02:00',
datefmt.format_time(t, 'iso8601', tz, 'iso8601'))
self.assertEqual('2010-08-28T11:45:56+02:00',
datefmt.format_datetime(t, 'iso8601', tz, 'iso8601'))
def test_parse_date_offset(self):
t_utc = datetime.datetime(2009, 12, 1, 11, 0, 0, 0, datefmt.utc)
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T11:00:00Z'))
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T11:00:00+00:00'))
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T11:00:00-00:00'))
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T09:00:00-02:00'))
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T11:30:00+00:30'))
def test_parse_date_usec(self):
tz = datefmt.get_timezone('GMT +1:00')
t_utc = datetime.datetime(2009, 12, 1, 11, 0, 0, 98765, datefmt.utc)
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T12:00:00.098765', tz))
self.assertEqual(t_utc,
datefmt.parse_date('2009-12-01T12:00:00,098765', tz))
self.assertEqual(datetime.datetime(2009, 12, 1, 11, 0, 0, 98700,
datefmt.utc),
datefmt.parse_date('2009-12-01T12:00:00.0987', tz))
self.assertEqual(datetime.datetime(2009, 12, 1, 11, 0, 0, 90000,
datefmt.utc),
datefmt.parse_date('2009-12-01T12:00:00.09', tz))
self.assertEqual(datetime.datetime(2009, 12, 1, 11, 0, 0, 0,
datefmt.utc),
datefmt.parse_date('2009-12-01T12:00:00.0', tz))
def test_with_babel_format(self):
tz = datefmt.timezone('GMT +2:00')
t = datetime.datetime(2010, 8, 28, 11, 45, 56, 123456, tz)
for f in ('short', 'medium', 'long', 'full'):
self.assertEqual('2010-08-28',
datefmt.format_date(t, f, tz, 'iso8601'))
self.assertEqual('11:45',
datefmt.format_time(t, 'short', tz, 'iso8601'))
self.assertEqual('2010-08-28T11:45',
datefmt.format_datetime(t, 'short', tz, 'iso8601'))
self.assertEqual('11:45:56',
datefmt.format_time(t, 'medium', tz, 'iso8601'))
self.assertEqual('2010-08-28T11:45:56',
datefmt.format_datetime(t, 'medium', tz, 'iso8601'))
for f in ('long', 'full'):
self.assertEqual('11:45:56+02:00',
datefmt.format_time(t, f, tz, 'iso8601'))
self.assertEqual('2010-08-28T11:45:56+02:00',
datefmt.format_datetime(t, f, tz, 'iso8601'))
def test_hint(self):
try:
datefmt.parse_date('***', locale='iso8601', hint='date')
except TracError, e:
self.assert_('"YYYY-MM-DD"' in unicode(e))
try:
datefmt.parse_date('***', locale='iso8601', hint='datetime')
except TracError, e:
self.assert_(u'"YYYY-MM-DDThh:mm:ss±hh:mm"' in unicode(e))
try:
datefmt.parse_date('***', locale='iso8601', hint='foobar')
except TracError, e:
self.assert_('"foobar"' in unicode(e))
if Locale is None:
I18nDateFormatTestCase = None
else:
class I18nDateFormatTestCase(unittest.TestCase):
def test_i18n_format_datetime(self):
tz = datefmt.timezone('GMT +2:00')
t = datetime.datetime(2010, 8, 28, 11, 45, 56, 123456, datefmt.utc)
en_US = Locale.parse('en_US')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/tests/presentation.py | trac/trac/util/tests/presentation.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# Copyright (C) 2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
import unittest
from trac.util import presentation
class ToJsonTestCase(unittest.TestCase):
def test_simple_types(self):
self.assertEqual('42', presentation.to_json(42))
self.assertEqual('123.456', presentation.to_json(123.456))
self.assertEqual('true', presentation.to_json(True))
self.assertEqual('false', presentation.to_json(False))
self.assertEqual('null', presentation.to_json(None))
self.assertEqual('"String"', presentation.to_json('String'))
self.assertEqual(r'"a \" quote"', presentation.to_json('a " quote'))
self.assertEqual('''"a ' single quote"''',
presentation.to_json("a ' single quote"))
self.assertEqual(r'"\u003cb\u003e\u0026\u003c/b\u003e"',
presentation.to_json('<b>&</b>'))
def test_compound_types(self):
self.assertEqual('[1,2,[true,false]]',
presentation.to_json([1, 2, [True, False]]))
self.assertEqual(r'{"one":1,"other":[null,0],'
r'''"three":[3,"\u0026\u003c\u003e'"],'''
r'"two":2}',
presentation.to_json({"one": 1, "two": 2,
"other": [None, 0],
"three": [3, "&<>'"]}))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(presentation))
suite.addTest(unittest.makeSuite(ToJsonTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/tests/__init__.py | trac/trac/util/tests/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import doctest
import os.path
import random
import re
import tempfile
import unittest
from trac import util
from trac.util.tests import concurrency, datefmt, presentation, text, html
class AtomicFileTestCase(unittest.TestCase):
def setUp(self):
self.path = os.path.join(tempfile.gettempdir(), 'trac-tempfile')
def tearDown(self):
try:
os.unlink(self.path)
except OSError:
pass
def test_non_existing(self):
with util.AtomicFile(self.path) as f:
f.write('test content')
self.assertEqual(True, f.closed)
self.assertEqual('test content', util.read_file(self.path))
def test_existing(self):
util.create_file(self.path, 'Some content')
self.assertEqual('Some content', util.read_file(self.path))
with util.AtomicFile(self.path) as f:
f.write('Some new content')
self.assertEqual(True, f.closed)
self.assertEqual('Some new content', util.read_file(self.path))
if util.can_rename_open_file:
def test_existing_open_for_reading(self):
util.create_file(self.path, 'Initial file content')
self.assertEqual('Initial file content', util.read_file(self.path))
with open(self.path) as rf:
with util.AtomicFile(self.path) as f:
f.write('Replaced content')
self.assertEqual(True, rf.closed)
self.assertEqual(True, f.closed)
self.assertEqual('Replaced content', util.read_file(self.path))
# FIXME: It is currently not possible to make this test pass on all
# platforms and with all locales. Typically, it will fail on Linux with
# LC_ALL=C.
# Python 3 adds sys.setfilesystemencoding(), which could be used here
# to remove the dependency on the locale. So the test is disabled until
# we require Python 3.
def _test_unicode_path(self):
self.path = os.path.join(tempfile.gettempdir(), u'träc-témpfilè')
with util.AtomicFile(self.path) as f:
f.write('test content')
self.assertEqual(True, f.closed)
self.assertEqual('test content', util.read_file(self.path))
class PathTestCase(unittest.TestCase):
def assert_below(self, path, parent):
self.assert_(util.is_path_below(path.replace('/', os.sep),
parent.replace('/', os.sep)))
def assert_not_below(self, path, parent):
self.assert_(not util.is_path_below(path.replace('/', os.sep),
parent.replace('/', os.sep)))
def test_is_path_below(self):
self.assert_below('/svn/project1', '/svn/project1')
self.assert_below('/svn/project1/repos', '/svn/project1')
self.assert_below('/svn/project1/sub/repos', '/svn/project1')
self.assert_below('/svn/project1/sub/../repos', '/svn/project1')
self.assert_not_below('/svn/project2/repos', '/svn/project1')
self.assert_not_below('/svn/project2/sub/repos', '/svn/project1')
self.assert_not_below('/svn/project1/../project2/repos',
'/svn/project1')
self.assert_(util.is_path_below('repos', os.path.join(os.getcwd())))
self.assert_(not util.is_path_below('../sub/repos',
os.path.join(os.getcwd())))
class RandomTestCase(unittest.TestCase):
def setUp(self):
self.state = random.getstate()
def tearDown(self):
random.setstate(self.state)
def test_urandom(self):
"""urandom() returns random bytes"""
for i in xrange(129):
self.assertEqual(i, len(util.urandom(i)))
# For a large enough sample, each value should appear at least once
entropy = util.urandom(65536)
values = set(ord(c) for c in entropy)
self.assertEqual(256, len(values))
def test_hex_entropy(self):
"""hex_entropy() returns random hex digits"""
hex_digits = set('0123456789abcdef')
for i in xrange(129):
entropy = util.hex_entropy(i)
self.assertEqual(i, len(entropy))
self.assertEqual(set(), set(entropy) - hex_digits)
def test_hex_entropy_global_state(self):
"""hex_entropy() not affected by global random generator state"""
random.seed(0)
data = util.hex_entropy(64)
random.seed(0)
self.assertNotEqual(data, util.hex_entropy(64))
class ContentDispositionTestCase(unittest.TestCase):
def test_filename(self):
self.assertEqual('attachment; filename=myfile.txt',
util.content_disposition('attachment', 'myfile.txt'))
self.assertEqual('attachment; filename=a%20file.txt',
util.content_disposition('attachment', 'a file.txt'))
def test_no_filename(self):
self.assertEqual('inline', util.content_disposition('inline'))
self.assertEqual('attachment', util.content_disposition('attachment'))
def test_no_type(self):
self.assertEqual('filename=myfile.txt',
util.content_disposition(filename='myfile.txt'))
self.assertEqual('filename=a%20file.txt',
util.content_disposition(filename='a file.txt'))
class SafeReprTestCase(unittest.TestCase):
def test_normal_repr(self):
for x in ([1, 2, 3], "été", u"été"):
self.assertEqual(repr(x), util.safe_repr(x))
def test_buggy_repr(self):
class eh_ix(object):
def __repr__(self):
return 1 + "2"
self.assertRaises(Exception, repr, eh_ix())
sr = util.safe_repr(eh_ix())
sr = re.sub('[A-F0-9]{4,}', 'ADDRESS', sr)
sr = re.sub(r'__main__|trac\.util\.tests', 'MODULE', sr)
self.assertEqual("<MODULE.eh_ix object at 0xADDRESS "
"(repr() error: TypeError: unsupported operand "
"type(s) for +: 'int' and 'str')>", sr)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AtomicFileTestCase, 'test'))
suite.addTest(unittest.makeSuite(PathTestCase, 'test'))
suite.addTest(unittest.makeSuite(RandomTestCase, 'test'))
suite.addTest(unittest.makeSuite(ContentDispositionTestCase, 'test'))
suite.addTest(unittest.makeSuite(SafeReprTestCase, 'test'))
suite.addTest(concurrency.suite())
suite.addTest(datefmt.suite())
suite.addTest(presentation.suite())
suite.addTest(doctest.DocTestSuite(util))
suite.addTest(text.suite())
suite.addTest(html.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/tests/text.py | trac/trac/util/tests/text.py | # -*- coding: utf-8 -*-
import unittest
from StringIO import StringIO
from trac.util.text import empty, expandtabs, fix_eol, javascript_quote, \
to_js_string, normalize_whitespace, to_unicode, \
text_width, print_table, unicode_quote, \
unicode_quote_plus, unicode_unquote, \
unicode_urlencode, wrap, quote_query_string, \
unicode_to_base64, unicode_from_base64, \
strip_line_ws, stripws, levenshtein_distance
class ToUnicodeTestCase(unittest.TestCase):
def test_explicit_charset(self):
uc = to_unicode('\xc3\xa7', 'utf-8')
assert isinstance(uc, unicode)
self.assertEquals(u'\xe7', uc)
def test_explicit_charset_with_replace(self):
uc = to_unicode('\xc3', 'utf-8')
assert isinstance(uc, unicode)
self.assertEquals(u'\xc3', uc)
def test_implicit_charset(self):
uc = to_unicode('\xc3\xa7')
assert isinstance(uc, unicode)
self.assertEquals(u'\xe7', uc)
def test_from_exception_using_unicode_args(self):
u = u'\uB144'
try:
raise ValueError, '%s is not a number.' % u
except ValueError, e:
self.assertEquals(u'\uB144 is not a number.', to_unicode(e))
def test_from_exception_using_str_args(self):
u = u'Das Ger\xe4t oder die Ressource ist belegt'
try:
raise ValueError, u.encode('utf-8')
except ValueError, e:
self.assertEquals(u, to_unicode(e))
class ExpandtabsTestCase(unittest.TestCase):
def test_empty(self):
x = expandtabs('', ignoring='\0')
self.assertEquals('', x)
def test_ingoring(self):
x = expandtabs('\0\t', ignoring='\0')
self.assertEquals('\0 ', x)
def test_tabstops(self):
self.assertEquals(' ', expandtabs(' \t'))
self.assertEquals(' ', expandtabs('\t\t'))
class JavascriptQuoteTestCase(unittest.TestCase):
def test_quoting(self):
self.assertEqual(r'Quote \" in text',
javascript_quote('Quote " in text'))
self.assertEqual(r'\\\"\b\f\n\r\t\'',
javascript_quote('\\"\b\f\n\r\t\''))
self.assertEqual(r'\u0002\u001e',
javascript_quote('\x02\x1e'))
self.assertEqual(r'\u0026\u003c\u003e',
javascript_quote('&<>'))
class ToJsStringTestCase(unittest.TestCase):
def test_(self):
self.assertEqual(r'"Quote \" in text"',
to_js_string('Quote " in text'))
self.assertEqual(r'''"\\\"\b\f\n\r\t'"''',
to_js_string('\\"\b\f\n\r\t\''))
self.assertEqual(r'"\u0002\u001e"',
to_js_string('\x02\x1e'))
self.assertEqual(r'"\u0026\u003c\u003e"',
to_js_string('&<>'))
self.assertEqual('""',
to_js_string(''))
self.assertEqual('""',
to_js_string(None))
class UnicodeQuoteTestCase(unittest.TestCase):
def test_unicode_quote(self):
self.assertEqual(u'the%20%C3%9C%20thing',
unicode_quote(u'the Ü thing'))
self.assertEqual(u'%2520%C3%9C%20%2520',
unicode_quote(u'%20Ü %20'))
def test_unicode_quote_plus(self):
self.assertEqual(u'the+%C3%9C+thing',
unicode_quote_plus(u'the Ü thing'))
self.assertEqual(u'%2520%C3%9C+%2520',
unicode_quote_plus(u'%20Ü %20'))
def test_unicode_unquote(self):
u = u'the Ü thing'
up = u'%20Ü %20'
self.assertEqual(u, unicode_unquote(unicode_quote(u)))
self.assertEqual(up, unicode_unquote(unicode_quote(up)))
def test_unicode_urlencode(self):
self.assertEqual('thing=%C3%9C&%C3%9C=thing&%C3%9Cthing',
unicode_urlencode({u'Ü': 'thing',
'thing': u'Ü',
u'Üthing': empty}))
class QuoteQueryStringTestCase(unittest.TestCase):
def test_quote(self):
text = u'type=the Ü thing&component=comp\x7fonent'
self.assertEqual('type=the+%C3%9C+thing&component=comp%7Fonent',
quote_query_string(text))
class WhitespaceTestCase(unittest.TestCase):
def test_default(self):
self.assertEqual(u'This is text ',
normalize_whitespace(u'Th\u200bis\u00a0is te\u200bxt\u00a0'))
self.assertEqual(u'Some other text',
normalize_whitespace(u'Some\tother\ntext\r', to_space='\t\n',
remove='\r'))
class TextWidthTestCase(unittest.TestCase):
def test_single(self):
def tw1(text):
return text_width(text, ambiwidth=1)
self.assertEqual(8, tw1(u'Alphabet'))
self.assertEqual(16, tw1('east asian width'))
self.assertEqual(16, tw1(u'ひらがなカタカナ'))
self.assertEqual(21, tw1(u'色は匂えど…酔ひもせず'))
def test_double(self):
def tw2(text):
return text_width(text, ambiwidth=2)
self.assertEqual(8, tw2(u'Alphabet'))
self.assertEqual(16, tw2('east asian width'))
self.assertEqual(16, tw2(u'ひらがなカタカナ'))
self.assertEqual(22, tw2(u'色は匂えど…酔ひもせず'))
class PrintTableTestCase(unittest.TestCase):
def test_single_bytes(self):
data = (
('Trac 0.12', '2010-06-13', 'Babel'),
('Trac 0.11', '2008-06-22', 'Genshi'),
('Trac 0.10', '2006-09-28', 'Zengia'),
('Trac 0.9', '2005-10-31', 'Vodun'),
('Trac 0.8', '2004-11-15', 'Qualia'),
('Trac 0.7', '2004-05-18', 'Fulci'),
('Trac 0.6', '2004-03-23', 'Solanum'),
('Trac 0.5', '2004-02-23', 'Incognito'),
)
headers = ('Version', 'Date', 'Name')
expected = """\
Version Date Name
----------------------------------
Trac 0.12 | 2010-06-13 | Babel
Trac 0.11 | 2008-06-22 | Genshi
Trac 0.10 | 2006-09-28 | Zengia
Trac 0.9 | 2005-10-31 | Vodun
Trac 0.8 | 2004-11-15 | Qualia
Trac 0.7 | 2004-05-18 | Fulci
Trac 0.6 | 2004-03-23 | Solanum
Trac 0.5 | 2004-02-23 | Incognito
"""
self._validate_print_table(expected, data, headers=headers, sep=' | ',
ambiwidth=1)
def test_various_types(self):
data = (
('NoneType', 'None', None),
('bool', 'True', True),
('bool', 'False', False),
('int', '0', 0),
('float', '0.0', 0.0),
)
expected = u"""\
NoneType | None |
bool | True | True
bool | False | False
int | 0 | 0
float | 0.0 | 0.0
"""
self._validate_print_table(expected, data, sep=' | ', ambiwidth=1)
def test_ambiwidth_1(self):
data = (
('foo@localhost', 'foo@localhost'),
(u'bar@….com', 'bar@example.com'),
)
headers = ('Obfuscated', 'Email')
expected = u"""\
Obfuscated Email
-------------------------------
foo@localhost | foo@localhost
bar@….com | bar@example.com
"""
self._validate_print_table(expected, data, headers=headers, sep=' | ',
ambiwidth=1)
def test_ambiwidth_2(self):
data = (
('foo@localhost', 'foo@localhost'),
(u'bar@….com', 'bar@example.com'),
)
headers = ('Obfuscated', 'Email')
expected = u"""\
Obfuscated Email
-------------------------------
foo@localhost | foo@localhost
bar@….com | bar@example.com
"""
self._validate_print_table(expected, data, headers=headers, sep=' | ',
ambiwidth=2)
def _validate_print_table(self, expected, data, **kwargs):
out = StringIO()
kwargs['out'] = out
print_table(data, **kwargs)
self.assertEqual(expected.encode('utf-8'),
strip_line_ws(out.getvalue(), leading=False))
class WrapTestCase(unittest.TestCase):
def test_wrap_ambiwidth_single(self):
text = u'Lorem ipsum dolor sit amet, consectetur adipisicing ' + \
u'elit, sed do eiusmod tempor incididunt ut labore et ' + \
u'dolore magna aliqua. Ut enim ad minim veniam, quis ' + \
u'nostrud exercitation ullamco laboris nisi ut aliquip ex ' + \
u'ea commodo consequat. Duis aute irure dolor in ' + \
u'reprehenderit in voluptate velit esse cillum dolore eu ' + \
u'fugiat nulla pariatur. Excepteur sint occaecat ' + \
u'cupidatat non proident, sunt in culpa qui officia ' + \
u'deserunt mollit anim id est laborum.'
wrapped = u"""\
> Lorem ipsum dolor sit amet, consectetur adipisicing elit,
| sed do eiusmod tempor incididunt ut labore et dolore
| magna aliqua. Ut enim ad minim veniam, quis nostrud
| exercitation ullamco laboris nisi ut aliquip ex ea
| commodo consequat. Duis aute irure dolor in reprehenderit
| in voluptate velit esse cillum dolore eu fugiat nulla
| pariatur. Excepteur sint occaecat cupidatat non proident,
| sunt in culpa qui officia deserunt mollit anim id est
| laborum."""
self.assertEqual(wrapped, wrap(text, 59, '> ', '| ', '\n'))
def test_wrap_ambiwidth_double(self):
text = u'Trac は BSD ライセンスのもとで配布されて' + \
u'います。[1:]このライセンスの全文は、𠀋' + \
u'配布ファイルに含まれている[3:CОPYING]ファ' + \
u'イルと同じものが[2:オンライン]で参照でき' \
u'ます。'
wrapped = u"""\
> Trac は BSD ライセンスのもとで配布されています。[1:]この
| ライセンスの全文は、𠀋配布ファイルに含まれている
| [3:CОPYING]ファイルと同じものが[2:オンライン]で参照でき
| ます。"""
self.assertEqual(wrapped, wrap(text, 59, '> ', '| ', '\n',
ambiwidth=2))
class FixEolTestCase(unittest.TestCase):
def test_mixed_eol(self):
text = u'\nLine 2\rLine 3\r\nLine 4\n\r'
self.assertEqual(u'\nLine 2\nLine 3\nLine 4\n\n',
fix_eol(text, '\n'))
self.assertEqual(u'\rLine 2\rLine 3\rLine 4\r\r',
fix_eol(text, '\r'))
self.assertEqual(u'\r\nLine 2\r\nLine 3\r\nLine 4\r\n\r\n',
fix_eol(text, '\r\n'))
class UnicodeBase64TestCase(unittest.TestCase):
def test_to_and_from_base64_unicode(self):
text = u'Trac は ØÆÅ'
text_base64 = unicode_to_base64(text)
self.assertEqual('VHJhYyDjga8gw5jDhsOF', text_base64)
self.assertEqual(text, unicode_from_base64(text_base64))
def test_to_and_from_base64_whitespace(self):
# test that removing whitespace does not affect conversion
text = 'a space: '
text_base64 = unicode_to_base64(text)
self.assertEqual('YSBzcGFjZTog', text_base64)
self.assertEqual(text, unicode_from_base64(text_base64))
text = 'two newlines: \n\n'
text_base64 = unicode_to_base64(text)
self.assertEqual('dHdvIG5ld2xpbmVzOiAKCg==', text_base64)
self.assertEqual(text, unicode_from_base64(text_base64))
text = 'a test string ' * 10000
text_base64_strip = unicode_to_base64(text)
text_base64_no_strip = unicode_to_base64(text, strip_newlines=False)
self.assertNotEqual(text_base64_strip, text_base64_no_strip)
self.assertEqual(text, unicode_from_base64(text_base64_strip))
self.assertEqual(text, unicode_from_base64(text_base64_no_strip))
class StripwsTestCase(unittest.TestCase):
def test_stripws(self):
self.assertEquals(u'stripws',
stripws(u' \u200b\t\u3000stripws \u200b\t\u2008'))
self.assertEquals(u'stripws \u3000\t',
stripws(u'\u200b\t\u2008 stripws \u3000\t',
trailing=False))
self.assertEquals(u' \t\u3000stripws',
stripws(u' \t\u3000stripws \u200b\t\u2008',
leading=False))
self.assertEquals(u' \t\u3000stripws \u200b\t\u2008',
stripws(u' \t\u3000stripws \u200b\t\u2008',
leading=False, trailing=False))
class LevenshteinDistanceTestCase(unittest.TestCase):
def test_distance(self):
self.assertEqual(5, levenshtein_distance('kitten', 'sitting'))
self.assertEqual(1, levenshtein_distance('wii', 'wiki'))
self.assertEqual(2, levenshtein_distance('comfig', 'config'))
self.assertEqual(5, levenshtein_distance('update', 'upgrade'))
self.assertEqual(0, levenshtein_distance('milestone', 'milestone'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ToUnicodeTestCase, 'test'))
suite.addTest(unittest.makeSuite(ExpandtabsTestCase, 'test'))
suite.addTest(unittest.makeSuite(UnicodeQuoteTestCase, 'test'))
suite.addTest(unittest.makeSuite(JavascriptQuoteTestCase, 'test'))
suite.addTest(unittest.makeSuite(ToJsStringTestCase, 'test'))
suite.addTest(unittest.makeSuite(QuoteQueryStringTestCase, 'test'))
suite.addTest(unittest.makeSuite(WhitespaceTestCase, 'test'))
suite.addTest(unittest.makeSuite(TextWidthTestCase, 'test'))
suite.addTest(unittest.makeSuite(PrintTableTestCase, 'test'))
suite.addTest(unittest.makeSuite(WrapTestCase, 'test'))
suite.addTest(unittest.makeSuite(FixEolTestCase, 'test'))
suite.addTest(unittest.makeSuite(UnicodeBase64TestCase, 'test'))
suite.addTest(unittest.makeSuite(StripwsTestCase, 'test'))
suite.addTest(unittest.makeSuite(LevenshteinDistanceTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/util/tests/concurrency.py | trac/trac/util/tests/concurrency.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import threading
import unittest
from trac.util.concurrency import ThreadLocal
class ThreadLocalTestCase(unittest.TestCase):
def test_thread_local(self):
local = ThreadLocal(a=1, b=2)
local.b = 3
local.c = 4
local_dict = [local.__dict__.copy()]
def f():
local.b = 5
local.d = 6
local_dict.append(local.__dict__.copy())
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(dict(a=1, b=3, c=4), local_dict[0])
self.assertEqual(dict(a=1, b=5, d=6), local_dict[1])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ThreadLocalTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/sqlite_backend.py | trac/trac/db/sqlite_backend.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2010 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import os
import re
import weakref
from trac.config import ListOption
from trac.core import *
from trac.db.api import IDatabaseConnector
from trac.db.util import ConnectionWrapper, IterableCursor
from trac.util import get_pkginfo, getuser
from trac.util.translation import _
_like_escape_re = re.compile(r'([/_%])')
try:
import pysqlite2.dbapi2 as sqlite
have_pysqlite = 2
except ImportError:
try:
import sqlite3 as sqlite
have_pysqlite = 2
except ImportError:
have_pysqlite = 0
if have_pysqlite == 2:
# Force values to integers because PySQLite 2.2.0 had (2, 2, '0')
sqlite_version = tuple([int(x) for x in sqlite.sqlite_version_info])
sqlite_version_string = sqlite.sqlite_version
class PyFormatCursor(sqlite.Cursor):
def _rollback_on_error(self, function, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except sqlite.DatabaseError:
self.cnx.rollback()
raise
def execute(self, sql, args=None):
if args:
sql = sql % (('?',) * len(args))
return self._rollback_on_error(sqlite.Cursor.execute, sql,
args or [])
def executemany(self, sql, args):
if not args:
return
sql = sql % (('?',) * len(args[0]))
return self._rollback_on_error(sqlite.Cursor.executemany, sql,
args)
# EagerCursor taken from the example in pysqlite's repository:
#
# http://code.google.com/p/pysqlite/source/browse/misc/eager.py
#
# Only change is to subclass it from PyFormatCursor instead of
# sqlite.Cursor.
class EagerCursor(PyFormatCursor):
def __init__(self, con):
PyFormatCursor.__init__(self, con)
self.rows = []
self.pos = 0
def execute(self, *args):
result = PyFormatCursor.execute(self, *args)
self.rows = PyFormatCursor.fetchall(self)
self.pos = 0
return result
def fetchone(self):
try:
row = self.rows[self.pos]
self.pos += 1
return row
except IndexError:
return None
def fetchmany(self, num=None):
if num is None:
num = self.arraysize
result = self.rows[self.pos:self.pos+num]
self.pos += num
return result
def fetchall(self):
result = self.rows[self.pos:]
self.pos = len(self.rows)
return result
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int': 'integer',
'int64': 'integer',
}
def _to_sql(table):
sql = ["CREATE TABLE %s (" % table.name]
coldefs = []
for column in table.columns:
ctype = column.type.lower()
ctype = _type_map.get(ctype, ctype)
if column.auto_increment:
ctype = "integer PRIMARY KEY"
elif len(table.key) == 1 and column.name in table.key:
ctype += " PRIMARY KEY"
coldefs.append(" %s %s" % (column.name, ctype))
if len(table.key) > 1:
coldefs.append(" UNIQUE (%s)" % ','.join(table.key))
sql.append(',\n'.join(coldefs) + '\n);')
yield '\n'.join(sql)
for index in table.indices:
unique = 'UNIQUE' if index.unique else ''
yield "CREATE %s INDEX %s_%s_idx ON %s (%s);" % (unique, table.name,
'_'.join(index.columns), table.name, ','.join(index.columns))
class SQLiteConnector(Component):
"""Database connector for SQLite.
Database URLs should be of the form:
{{{
sqlite:path/to/trac.db
}}}
"""
implements(IDatabaseConnector)
extensions = ListOption('sqlite', 'extensions',
doc="""Paths to sqlite extensions, relative to Trac environment's
directory or absolute. (''since 0.12'')""")
memory_cnx = None
def __init__(self):
self._version = None
self.error = None
self._extensions = None
def get_supported_schemes(self):
if not have_pysqlite:
self.error = _("Cannot load Python bindings for SQLite")
elif sqlite_version >= (3, 3, 3) and sqlite.version_info[0] == 2 and \
sqlite.version_info < (2, 0, 7):
self.error = _("Need at least PySqlite %(version)s or higher",
version='2.0.7')
elif (2, 5, 2) <= sqlite.version_info < (2, 5, 5):
self.error = _("PySqlite 2.5.2 - 2.5.4 break Trac, please use "
"2.5.5 or higher")
yield ('sqlite', -1 if self.error else 1)
def get_connection(self, path, log=None, params={}):
if not self._version:
self._version = get_pkginfo(sqlite).get(
'version', '%d.%d.%s' % sqlite.version_info)
self.env.systeminfo.extend([('SQLite', sqlite_version_string),
('pysqlite', self._version)])
self.required = True
# construct list of sqlite extension libraries
if self._extensions is None:
self._extensions = []
for extpath in self.extensions:
if not os.path.isabs(extpath):
extpath = os.path.join(self.env.path, extpath)
self._extensions.append(extpath)
params['extensions'] = self._extensions
if path == ':memory:':
if not self.memory_cnx:
self.memory_cnx = SQLiteConnection(path, log, params)
return self.memory_cnx
else:
return SQLiteConnection(path, log, params)
def get_exceptions(self):
return sqlite
def init_db(self, path, schema=None, log=None, params={}):
if path != ':memory:':
# make the directory to hold the database
if os.path.exists(path):
raise TracError(_("Database already exists at %(path)s",
path=path))
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
if isinstance(path, unicode): # needed with 2.4.0
path = path.encode('utf-8')
# this direct connect will create the database if needed
cnx = sqlite.connect(path,
timeout=int(params.get('timeout', 10000)))
else:
cnx = self.get_connection(path, log, params)
cursor = cnx.cursor()
if schema is None:
from trac.db_default import schema
for table in schema:
for stmt in self.to_sql(table):
cursor.execute(stmt)
cnx.commit()
def to_sql(self, table):
return _to_sql(table)
def alter_column_types(self, table, columns):
"""Yield SQL statements altering the type of one or more columns of
a table.
Type changes are specified as a `columns` dict mapping column names
to `(from, to)` SQL type tuples.
"""
for name, (from_, to) in sorted(columns.iteritems()):
if _type_map.get(to, to) != _type_map.get(from_, from_):
raise NotImplementedError('Conversion from %s to %s is not '
'implemented' % (from_, to))
return ()
def backup(self, dest_file):
"""Simple SQLite-specific backup of the database.
@param dest_file: Destination file basename
"""
import shutil
db_str = self.config.get('trac', 'database')
try:
db_str = db_str[:db_str.index('?')]
except ValueError:
pass
db_name = os.path.join(self.env.path, db_str[7:])
shutil.copy(db_name, dest_file)
if not os.path.exists(dest_file):
raise TracError(_("No destination file created"))
return dest_file
class SQLiteConnection(ConnectionWrapper):
"""Connection wrapper for SQLite."""
__slots__ = ['_active_cursors', '_eager']
poolable = have_pysqlite and sqlite_version >= (3, 3, 8) \
and sqlite.version_info >= (2, 5, 0)
def __init__(self, path, log=None, params={}):
assert have_pysqlite > 0
self.cnx = None
if path != ':memory:':
if not os.access(path, os.F_OK):
raise TracError(_('Database "%(path)s" not found.', path=path))
dbdir = os.path.dirname(path)
if not os.access(path, os.R_OK + os.W_OK) or \
not os.access(dbdir, os.R_OK + os.W_OK):
raise TracError(
_('The user %(user)s requires read _and_ write '
'permissions to the database file %(path)s '
'and the directory it is located in.',
user=getuser(), path=path))
self._active_cursors = weakref.WeakKeyDictionary()
timeout = int(params.get('timeout', 10.0))
self._eager = params.get('cursor', 'eager') == 'eager'
# eager is default, can be turned off by specifying ?cursor=
if isinstance(path, unicode): # needed with 2.4.0
path = path.encode('utf-8')
cnx = sqlite.connect(path, detect_types=sqlite.PARSE_DECLTYPES,
check_same_thread=sqlite_version < (3, 3, 1),
timeout=timeout)
# load extensions
extensions = params.get('extensions', [])
if len(extensions) > 0:
cnx.enable_load_extension(True)
for ext in extensions:
cnx.load_extension(ext)
cnx.enable_load_extension(False)
ConnectionWrapper.__init__(self, cnx, log)
def cursor(self):
cursor = self.cnx.cursor((PyFormatCursor, EagerCursor)[self._eager])
self._active_cursors[cursor] = True
cursor.cnx = self
return IterableCursor(cursor, self.log)
def rollback(self):
for cursor in self._active_cursors.keys():
cursor.close()
self.cnx.rollback()
def cast(self, column, type):
if sqlite_version >= (3, 2, 3):
return 'CAST(%s AS %s)' % (column, _type_map.get(type, type))
elif type == 'int':
# hack to force older SQLite versions to convert column to an int
return '1*' + column
else:
return column
def concat(self, *args):
return '||'.join(args)
def like(self):
"""Return a case-insensitive LIKE clause."""
if sqlite_version >= (3, 1, 0):
return "LIKE %s ESCAPE '/'"
else:
return 'LIKE %s'
def like_escape(self, text):
if sqlite_version >= (3, 1, 0):
return _like_escape_re.sub(r'/\1', text)
else:
return text
def quote(self, identifier):
"""Return the quoted identifier."""
return "`%s`" % identifier.replace('`', '``')
def get_last_id(self, cursor, table, column='id'):
return cursor.lastrowid
def update_sequence(self, cursor, table, column='id'):
# SQLite handles sequence updates automagically
# http://www.sqlite.org/autoinc.html
pass
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/api.py | trac/trac/db/api.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import os
import time
import urllib
from trac.config import BoolOption, IntOption, Option
from trac.core import *
from trac.util.concurrency import ThreadLocal
from trac.util.text import unicode_passwd
from trac.util.translation import _
from .pool import ConnectionPool
from .util import ConnectionWrapper
def with_transaction(env, db=None):
"""Function decorator to emulate a context manager for database
transactions.
>>> def api_method(p1, p2):
>>> result[0] = value1
>>> @with_transaction(env)
>>> def implementation(db):
>>> # implementation
>>> result[0] = value2
>>> return result[0]
In this example, the `implementation()` function is called
automatically right after its definition, with a database
connection as an argument. If the function completes, a COMMIT is
issued on the connection. If the function raises an exception, a
ROLLBACK is issued and the exception is re-raised. Nested
transactions are supported, and a COMMIT will only be issued when
the outermost transaction block in a thread exits.
This mechanism is intended to replace the former practice of
getting a database connection with `env.get_db_cnx()` and issuing
an explicit commit or rollback, for mutating database
accesses. Its automatic handling of commit, rollback and nesting
makes it much more robust.
The optional `db` argument is intended for legacy code and should
not be used in new code.
:deprecated: This decorator is in turn deprecated in favor of
context managers now that python 2.4 support has been
dropped. Use instead the new context manager,
`QueryContextManager` and
`TransactionContextManager`, which makes for much
simpler to write code:
>>> def api_method(p1, p2):
>>> result = value1
>>> with env.db_transaction as db:
>>> # implementation
>>> result = value2
>>> return result
"""
dbm = DatabaseManager(env)
_transaction_local = dbm._transaction_local
def transaction_wrapper(fn):
ldb = _transaction_local.wdb
if db is not None:
if ldb is None:
_transaction_local.wdb = db
try:
fn(db)
finally:
_transaction_local.wdb = None
else:
assert ldb is db, "Invalid transaction nesting"
fn(db)
elif ldb:
fn(ldb)
else:
ldb = _transaction_local.wdb = dbm.get_connection()
try:
fn(ldb)
ldb.commit()
_transaction_local.wdb = None
except:
_transaction_local.wdb = None
ldb.rollback()
ldb = None
raise
return transaction_wrapper
class DbContextManager(object):
"""Database Context Manager
The outermost `DbContextManager` will close the connection.
"""
db = None
def __init__(self, env):
self.dbmgr = DatabaseManager(env)
def execute(self, query, params=None):
"""Shortcut for directly executing a query."""
with self as db:
return db.execute(query, params)
__call__ = execute
def executemany(self, query, params=None):
"""Shortcut for directly calling "executemany" on a query."""
with self as db:
return db.executemany(query, params)
class TransactionContextManager(DbContextManager):
"""Transactioned Database Context Manager for retrieving a
`~trac.db.util.ConnectionWrapper`.
The outermost such context manager will perform a commit upon
normal exit or a rollback after an exception.
"""
def __enter__(self):
db = self.dbmgr._transaction_local.wdb # outermost writable db
if not db:
db = self.dbmgr._transaction_local.rdb # reuse wrapped connection
if db:
db = ConnectionWrapper(db.cnx, db.log)
else:
db = self.dbmgr.get_connection()
self.dbmgr._transaction_local.wdb = self.db = db
return db
def __exit__(self, et, ev, tb):
if self.db:
self.dbmgr._transaction_local.wdb = None
if et is None:
self.db.commit()
else:
self.db.rollback()
if not self.dbmgr._transaction_local.rdb:
self.db.close()
class QueryContextManager(DbContextManager):
"""Database Context Manager for retrieving a read-only
`~trac.db.util.ConnectionWrapper`.
"""
def __enter__(self):
db = self.dbmgr._transaction_local.rdb # outermost readonly db
if not db:
db = self.dbmgr._transaction_local.wdb # reuse wrapped connection
if db:
db = ConnectionWrapper(db.cnx, db.log, readonly=True)
else:
db = self.dbmgr.get_connection(readonly=True)
self.dbmgr._transaction_local.rdb = self.db = db
return db
def __exit__(self, et, ev, tb):
if self.db:
self.dbmgr._transaction_local.rdb = None
if not self.dbmgr._transaction_local.wdb:
self.db.close()
class IDatabaseConnector(Interface):
"""Extension point interface for components that support the
connection to relational databases.
"""
def get_supported_schemes():
"""Return the connection URL schemes supported by the
connector, and their relative priorities as an iterable of
`(scheme, priority)` tuples.
If `priority` is a negative number, this is indicative of an
error condition with the connector. An error message should be
attached to the `error` attribute of the connector.
"""
def get_connection(path, log=None, **kwargs):
"""Create a new connection to the database."""
def get_exceptions():
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
"""
def init_db(path, schema=None, log=None, **kwargs):
"""Initialize the database."""
def to_sql(table):
"""Return the DDL statements necessary to create the specified
table, including indices."""
def backup(dest):
"""Backup the database to a location defined by
trac.backup_dir"""
class DatabaseManager(Component):
"""Component used to manage the `IDatabaseConnector` implementations."""
connectors = ExtensionPoint(IDatabaseConnector)
connection_uri = Option('trac', 'database', 'sqlite:db/trac.db',
"""Database connection
[wiki:TracEnvironment#DatabaseConnectionStrings string] for this
project""")
backup_dir = Option('trac', 'backup_dir', 'db',
"""Database backup location""")
timeout = IntOption('trac', 'timeout', '20',
"""Timeout value for database connection, in seconds.
Use '0' to specify ''no timeout''. ''(Since 0.11)''""")
debug_sql = BoolOption('trac', 'debug_sql', False,
"""Show the SQL queries in the Trac log, at DEBUG level.
''(Since 0.11.5)''""")
def __init__(self):
self._cnx_pool = None
self._transaction_local = ThreadLocal(wdb=None, rdb=None)
def init_db(self):
connector, args = self.get_connector()
from trac.db_default import schema
args['schema'] = schema
connector.init_db(**args)
def get_connection(self, readonly=False):
"""Get a database connection from the pool.
If `readonly` is `True`, the returned connection will purposedly
lack the `rollback` and `commit` methods.
"""
if not self._cnx_pool:
connector, args = self.get_connector()
self._cnx_pool = ConnectionPool(5, connector, **args)
db = self._cnx_pool.get_cnx(self.timeout or None)
if readonly:
db = ConnectionWrapper(db, readonly=True)
return db
def get_exceptions(self):
return self.get_connector()[0].get_exceptions()
def shutdown(self, tid=None):
if self._cnx_pool:
self._cnx_pool.shutdown(tid)
if not tid:
self._cnx_pool = None
def backup(self, dest=None):
"""Save a backup of the database.
:param dest: base filename to write to.
Returns the file actually written.
"""
connector, args = self.get_connector()
if not dest:
backup_dir = self.backup_dir
if not os.path.isabs(backup_dir):
backup_dir = os.path.join(self.env.path, backup_dir)
db_str = self.config.get('trac', 'database')
db_name, db_path = db_str.split(":", 1)
dest_name = '%s.%i.%d.bak' % (db_name, self.env.get_version(),
int(time.time()))
dest = os.path.join(backup_dir, dest_name)
else:
backup_dir = os.path.dirname(dest)
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
return connector.backup(dest)
def get_connector(self):
scheme, args = _parse_db_str(self.connection_uri)
candidates = [
(priority, connector)
for connector in self.connectors
for scheme_, priority in connector.get_supported_schemes()
if scheme_ == scheme
]
if not candidates:
raise TracError(_('Unsupported database type "%(scheme)s"',
scheme=scheme))
priority, connector = max(candidates)
if priority < 0:
raise TracError(connector.error)
if scheme == 'sqlite':
if args['path'] == ':memory:':
# Special case for SQLite in-memory database, always get
# the /same/ connection over
pass
elif not args['path'].startswith('/'):
# Special case for SQLite to support a path relative to the
# environment directory
args['path'] = os.path.join(self.env.path,
args['path'].lstrip('/'))
if self.debug_sql:
args['log'] = self.log
return connector, args
_get_connector = get_connector # For 0.11 compatibility
def get_column_names(cursor):
"""Retrieve column names from a cursor, if possible."""
return [unicode(d[0], 'utf-8') if isinstance(d[0], str) else d[0]
for d in cursor.description] if cursor.description else []
def _parse_db_str(db_str):
scheme, rest = db_str.split(':', 1)
if not rest.startswith('/'):
if scheme == 'sqlite':
# Support for relative and in-memory SQLite connection strings
host = None
path = rest
else:
raise TracError(_('Unknown scheme "%(scheme)s"; database '
'connection string must start with {scheme}:/',
scheme=scheme))
else:
if not rest.startswith('//'):
host = None
rest = rest[1:]
elif rest.startswith('///'):
host = None
rest = rest[3:]
else:
rest = rest[2:]
if '/' not in rest:
host = rest
rest = ''
else:
host, rest = rest.split('/', 1)
path = None
if host and '@' in host:
user, host = host.split('@', 1)
if ':' in user:
user, password = user.split(':', 1)
else:
password = None
if user:
user = urllib.unquote(user)
if password:
password = unicode_passwd(urllib.unquote(password))
else:
user = password = None
if host and ':' in host:
host, port = host.split(':')
port = int(port)
else:
port = None
if not path:
path = '/' + rest
if os.name == 'nt':
# Support local paths containing drive letters on Win32
if len(rest) > 1 and rest[1] == '|':
path = "%s:%s" % (rest[0], rest[2:])
params = {}
if '?' in path:
path, qs = path.split('?', 1)
qs = qs.split('&')
for param in qs:
name, value = param.split('=', 1)
value = urllib.unquote(value)
params[name] = value
args = zip(('user', 'password', 'host', 'port', 'path', 'params'),
(user, password, host, port, path, params))
return scheme, dict([(key, value) for key, value in args if value])
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/util.py | trac/trac/db/util.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
def sql_escape_percent(sql):
import re
return re.sub("'((?:[^']|(?:''))*)'",
lambda m: m.group(0).replace('%', '%%'), sql)
class IterableCursor(object):
"""Wrapper for DB-API cursor objects that makes the cursor iterable
and escapes all "%"s used inside literal strings with parameterized
queries.
Iteration will generate the rows of a SELECT query one by one.
"""
__slots__ = ['cursor', 'log']
def __init__(self, cursor, log=None):
self.cursor = cursor
self.log = log
def __getattr__(self, name):
return getattr(self.cursor, name)
def __iter__(self):
while True:
row = self.cursor.fetchone()
if not row:
return
yield row
def execute(self, sql, args=None):
if self.log:
self.log.debug('SQL: %s', sql)
try:
if args:
self.log.debug('args: %r', args)
r = self.cursor.execute(sql_escape_percent(sql), args)
else:
r = self.cursor.execute(sql)
rows = getattr(self.cursor, 'rows', None)
if rows is not None:
self.log.debug("prefetch: %d rows", len(rows))
return r
except Exception, e:
self.log.debug('execute exception: %r', e)
raise
if args:
return self.cursor.execute(sql_escape_percent(sql), args)
return self.cursor.execute(sql)
def executemany(self, sql, args):
if self.log:
self.log.debug('SQL: %r', sql)
self.log.debug('args: %r', args)
if not args:
return
try:
if args[0]:
return self.cursor.executemany(sql_escape_percent(sql),
args)
return self.cursor.executemany(sql, args)
except Exception, e:
self.log.debug('executemany exception: %r', e)
raise
if not args:
return
if args[0]:
return self.cursor.executemany(sql_escape_percent(sql), args)
return self.cursor.executemany(sql, args)
class ConnectionWrapper(object):
"""Generic wrapper around connection objects.
:since 0.12: This wrapper no longer makes cursors produced by the
connection iterable using `IterableCursor`.
:since 1.0: added a 'readonly' flag preventing the forwarding of
`commit` and `rollback`
"""
__slots__ = ('cnx', 'log', 'readonly')
def __init__(self, cnx, log=None, readonly=False):
self.cnx = cnx
self.log = log
self.readonly = readonly
def __getattr__(self, name):
if self.readonly and name in ('commit', 'rollback'):
raise AttributeError
return getattr(self.cnx, name)
def execute(self, query, params=None):
"""Execute an SQL `query`
The optional `params` is a tuple containing the parameter
values expected by the query.
If the query is a SELECT, return all the rows ("fetchall").
When more control is needed, use `cursor()`.
"""
dql = self.check_select(query)
cursor = self.cnx.cursor()
cursor.execute(query, params)
rows = cursor.fetchall() if dql else None
cursor.close()
return rows
__call__ = execute
def executemany(self, query, params=None):
"""Execute an SQL `query`, on a sequence of tuples ("executemany").
The optional `params` is a sequence of tuples containing the
parameter values expected by the query.
If the query is a SELECT, return all the rows ("fetchall").
When more control is needed, use `cursor()`.
"""
dql = self.check_select(query)
cursor = self.cnx.cursor()
cursor.executemany(query, params)
rows = cursor.fetchall() if dql else None
cursor.close()
return rows
def check_select(self, query):
"""Verify if the query is compatible according to the readonly nature
of the wrapped Connection.
:return: `True` if this is a SELECT
:raise: `ValueError` if this is not a SELECT and the wrapped
Connection is read-only.
"""
dql = query.lstrip().startswith('SELECT')
if self.readonly and not dql:
raise ValueError("a 'readonly' connection can only do a SELECT")
return dql
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/schema.py | trac/trac/db/schema.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
class Table(object):
"""Declare a table in a database schema."""
def __init__(self, name, key=[]):
self.name = name
self.columns = []
self.indices = []
self.key = key
if isinstance(key, basestring):
self.key = [key]
def __getitem__(self, objs):
self.columns = [o for o in objs if isinstance(o, Column)]
self.indices = [o for o in objs if isinstance(o, Index)]
return self
class Column(object):
"""Declare a table column in a database schema."""
def __init__(self, name, type='text', size=None, key_size=None,
auto_increment=False):
self.name = name
self.type = type
self.size = size
self.key_size = key_size
self.auto_increment = auto_increment
class Index(object):
"""Declare an index for a database schema."""
def __init__(self, columns, unique=False):
self.columns = columns
self.unique = unique
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/postgres_backend.py | trac/trac/db/postgres_backend.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import re, os
from genshi import Markup
from trac.core import *
from trac.config import Option
from trac.db.api import IDatabaseConnector, _parse_db_str
from trac.db.util import ConnectionWrapper, IterableCursor
from trac.util import get_pkginfo
from trac.util.compat import close_fds
from trac.util.text import empty, exception_to_unicode, to_unicode
from trac.util.translation import _
has_psycopg = False
try:
import psycopg2 as psycopg
import psycopg2.extensions
from psycopg2 import DataError, ProgrammingError
from psycopg2.extensions import register_type, UNICODE, \
register_adapter, AsIs, QuotedString
register_type(UNICODE)
register_adapter(Markup, lambda markup: QuotedString(unicode(markup)))
register_adapter(type(empty), lambda empty: AsIs("''"))
has_psycopg = True
except ImportError:
pass
_like_escape_re = re.compile(r'([/_%])')
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int64': 'bigint',
}
def assemble_pg_dsn(path, user=None, password=None, host=None, port=None):
"""Quote the parameters and assemble the DSN."""
dsn = {'dbname': path, 'user': user, 'password': password, 'host': host,
'port': port}
return ' '.join(["%s='%s'" % (k,v) for k,v in dsn.iteritems() if v])
class PostgreSQLConnector(Component):
"""Database connector for PostgreSQL.
Database URLs should be of the form:
{{{
postgres://user[:password]@host[:port]/database[?schema=my_schema]
}}}
"""
implements(IDatabaseConnector)
pg_dump_path = Option('trac', 'pg_dump_path', 'pg_dump',
"""Location of pg_dump for Postgres database backups""")
def __init__(self):
self._version = None
self.error = None
def get_supported_schemes(self):
if not has_psycopg:
self.error = _("Cannot load Python bindings for PostgreSQL")
yield ('postgres', -1 if self.error else 1)
def get_connection(self, path, log=None, user=None, password=None,
host=None, port=None, params={}):
cnx = PostgreSQLConnection(path, log, user, password, host, port,
params)
if not self._version:
self._version = get_pkginfo(psycopg).get('version',
psycopg.__version__)
self.env.systeminfo.append(('psycopg2', self._version))
self.required = True
return cnx
def get_exceptions(self):
return psycopg
def init_db(self, path, schema=None, log=None, user=None, password=None,
host=None, port=None, params={}):
cnx = self.get_connection(path, log, user, password, host, port,
params)
cursor = cnx.cursor()
if cnx.schema:
cursor.execute('CREATE SCHEMA "%s"' % cnx.schema)
cursor.execute('SET search_path TO %s', (cnx.schema,))
if schema is None:
from trac.db_default import schema
for table in schema:
for stmt in self.to_sql(table):
cursor.execute(stmt)
cnx.commit()
def to_sql(self, table):
sql = ['CREATE TABLE "%s" (' % table.name]
coldefs = []
for column in table.columns:
ctype = column.type
ctype = _type_map.get(ctype, ctype)
if column.auto_increment:
ctype = 'SERIAL'
if len(table.key) == 1 and column.name in table.key:
ctype += ' PRIMARY KEY'
coldefs.append(' "%s" %s' % (column.name, ctype))
if len(table.key) > 1:
coldefs.append(' CONSTRAINT "%s_pk" PRIMARY KEY ("%s")'
% (table.name, '","'.join(table.key)))
sql.append(',\n'.join(coldefs) + '\n)')
yield '\n'.join(sql)
for index in table.indices:
unique = 'UNIQUE' if index.unique else ''
yield 'CREATE %s INDEX "%s_%s_idx" ON "%s" ("%s")' % \
(unique, table.name,
'_'.join(index.columns), table.name,
'","'.join(index.columns))
def alter_column_types(self, table, columns):
"""Yield SQL statements altering the type of one or more columns of
a table.
Type changes are specified as a `columns` dict mapping column names
to `(from, to)` SQL type tuples.
"""
alterations = []
for name, (from_, to) in sorted(columns.iteritems()):
to = _type_map.get(to, to)
if to != _type_map.get(from_, from_):
alterations.append((name, to))
if alterations:
yield "ALTER TABLE %s %s" % (table,
', '.join("ALTER COLUMN %s TYPE %s" % each
for each in alterations))
def backup(self, dest_file):
from subprocess import Popen, PIPE
db_url = self.env.config.get('trac', 'database')
scheme, db_prop = _parse_db_str(db_url)
db_params = db_prop.setdefault('params', {})
db_name = os.path.basename(db_prop['path'])
args = [self.pg_dump_path, '-C', '--inserts', '-x', '-Z', '8']
if 'user' in db_prop:
args.extend(['-U', db_prop['user']])
if 'host' in db_params:
host = db_params['host']
else:
host = db_prop.get('host')
if host:
args.extend(['-h', host])
if '/' not in host:
args.extend(['-p', str(db_prop.get('port', '5432'))])
if 'schema' in db_params:
try:
p = Popen([self.pg_dump_path, '--version'], stdout=PIPE,
close_fds=close_fds)
except OSError, e:
raise TracError(_("Unable to run %(path)s: %(msg)s",
path=self.pg_dump_path,
msg=exception_to_unicode(e)))
# Need quote for -n (--schema) option in PostgreSQL 8.2+
version = p.communicate()[0]
if re.search(r' 8\.[01]\.', version):
args.extend(['-n', db_params['schema']])
else:
args.extend(['-n', '"%s"' % db_params['schema']])
dest_file += ".gz"
args.extend(['-f', dest_file, db_name])
environ = os.environ.copy()
if 'password' in db_prop:
environ['PGPASSWORD'] = str(db_prop['password'])
try:
p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds)
except OSError, e:
raise TracError(_("Unable to run %(path)s: %(msg)s",
path=self.pg_dump_path,
msg=exception_to_unicode(e)))
errmsg = p.communicate()[1]
if p.returncode != 0:
raise TracError(_("pg_dump failed: %(msg)s",
msg=to_unicode(errmsg.strip())))
if not os.path.exists(dest_file):
raise TracError(_("No destination file created"))
return dest_file
class PostgreSQLConnection(ConnectionWrapper):
"""Connection wrapper for PostgreSQL."""
poolable = True
def __init__(self, path, log=None, user=None, password=None, host=None,
port=None, params={}):
if path.startswith('/'):
path = path[1:]
if 'host' in params:
host = params['host']
cnx = psycopg.connect(assemble_pg_dsn(path, user, password, host,
port))
cnx.set_client_encoding('UNICODE')
try:
self.schema = None
if 'schema' in params:
self.schema = params['schema']
cnx.cursor().execute('SET search_path TO %s', (self.schema,))
cnx.commit()
except (DataError, ProgrammingError):
cnx.rollback()
ConnectionWrapper.__init__(self, cnx, log)
def cast(self, column, type):
# Temporary hack needed for the union of selects in the search module
return 'CAST(%s AS %s)' % (column, _type_map.get(type, type))
def concat(self, *args):
return '||'.join(args)
def like(self):
"""Return a case-insensitive LIKE clause."""
return "ILIKE %s ESCAPE '/'"
def like_escape(self, text):
return _like_escape_re.sub(r'/\1', text)
def quote(self, identifier):
"""Return the quoted identifier."""
return '"%s"' % identifier.replace('"', '""')
def get_last_id(self, cursor, table, column='id'):
cursor.execute("""SELECT CURRVAL('"%s_%s_seq"')""" % (table, column))
return cursor.fetchone()[0]
def update_sequence(self, cursor, table, column='id'):
cursor.execute("""
SELECT setval('"%s_%s_seq"', (SELECT MAX(%s) FROM %s))
""" % (table, column, column, table))
def cursor(self):
return IterableCursor(self.cnx.cursor(), self.log)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/__init__.py | trac/trac/db/__init__.py | from trac.db.api import *
from trac.db.schema import *
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/mysql_backend.py | trac/trac/db/mysql_backend.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Jeff Weiss <trac@jeffweiss.org>
# Copyright (C) 2006 Andres Salomon <dilinger@athenacr.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os, re, types
from genshi.core import Markup
from trac.core import *
from trac.config import Option
from trac.db.api import IDatabaseConnector, _parse_db_str
from trac.db.util import ConnectionWrapper, IterableCursor
from trac.util import as_int, get_pkginfo
from trac.util.compat import close_fds
from trac.util.text import exception_to_unicode, to_unicode
from trac.util.translation import _
_like_escape_re = re.compile(r'([/_%])')
try:
import MySQLdb
import MySQLdb.cursors
has_mysqldb = True
class MySQLUnicodeCursor(MySQLdb.cursors.Cursor):
def _convert_row(self, row):
return tuple(v.decode('utf-8') if isinstance(v, str) else v
for v in row)
def fetchone(self):
row = super(MySQLUnicodeCursor, self).fetchone()
return self._convert_row(row) if row else None
def fetchmany(self, num):
rows = super(MySQLUnicodeCursor, self).fetchmany(num)
return [self._convert_row(row) for row in rows] \
if rows is not None else []
def fetchall(self):
rows = super(MySQLUnicodeCursor, self).fetchall()
return [self._convert_row(row) for row in rows] \
if rows is not None else []
except ImportError:
has_mysqldb = False
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int64': 'bigint',
}
class MySQLConnector(Component):
"""Database connector for MySQL version 4.1 and greater.
Database URLs should be of the form:
{{{
mysql://user[:password]@host[:port]/database[?param1=value¶m2=value]
}}}
The following parameters are supported:
* `compress`: Enable compression (0 or 1)
* `init_command`: Command to run once the connection is created
* `named_pipe`: Use a named pipe to connect on Windows (0 or 1)
* `read_default_file`: Read default client values from the given file
* `read_default_group`: Configuration group to use from the default file
* `unix_socket`: Use a Unix socket at the given path to connect
"""
implements(IDatabaseConnector)
mysqldump_path = Option('trac', 'mysqldump_path', 'mysqldump',
"""Location of mysqldump for MySQL database backups""")
def __init__(self):
self._version = None
self.error = None
def get_supported_schemes(self):
if not has_mysqldb:
self.error = _("Cannot load Python bindings for MySQL")
yield ('mysql', -1 if self.error else 1)
def get_connection(self, path, log=None, user=None, password=None,
host=None, port=None, params={}):
cnx = MySQLConnection(path, log, user, password, host, port, params)
if not self._version:
self._version = get_pkginfo(MySQLdb).get('version',
MySQLdb.__version__)
mysql_info = 'server: "%s", client: "%s", thread-safe: %s' % \
(cnx.cnx.get_server_info(),
MySQLdb.get_client_info(),
MySQLdb.thread_safe())
self.env.systeminfo.extend([('MySQL', mysql_info),
('MySQLdb', self._version)])
self.required = True
return cnx
def get_exceptions(self):
return MySQLdb
def init_db(self, path, schema=None, log=None, user=None, password=None,
host=None, port=None, params={}):
cnx = self.get_connection(path, log, user, password, host, port,
params)
cursor = cnx.cursor()
utf8_size = {'utf8': 3, 'utf8mb4': 4}.get(cnx.charset)
if schema is None:
from trac.db_default import schema
for table in schema:
for stmt in self.to_sql(table, utf8_size=utf8_size):
self.log.debug(stmt)
cursor.execute(stmt)
cnx.commit()
def _collist(self, table, columns, utf8_size=3):
"""Take a list of columns and impose limits on each so that indexing
works properly.
Some Versions of MySQL limit each index prefix to 1000 bytes total,
with a max of 767 bytes per column.
"""
cols = []
limit_col = 767 / utf8_size
limit = min(1000 / (utf8_size * len(columns)), limit_col)
for c in columns:
name = '`%s`' % c
table_col = filter((lambda x: x.name == c), table.columns)
if len(table_col) == 1 and table_col[0].type.lower() == 'text':
if table_col[0].key_size is not None:
name += '(%d)' % min(table_col[0].key_size, limit_col)
elif name == '`rev`':
name += '(20)'
elif name == '`path`':
name += '(%d)' % limit_col
elif name == '`change_type`':
name += '(2)'
else:
name += '(%s)' % limit
# For non-text columns, we simply throw away the extra bytes.
# That could certainly be optimized better, but for now let's KISS.
cols.append(name)
return ','.join(cols)
def to_sql(self, table, utf8_size=3):
sql = ['CREATE TABLE %s (' % table.name]
coldefs = []
for column in table.columns:
ctype = column.type
ctype = _type_map.get(ctype, ctype)
if column.auto_increment:
ctype = 'INT UNSIGNED NOT NULL AUTO_INCREMENT'
# Override the column type, as a text field cannot
# use auto_increment.
column.type = 'int'
coldefs.append(' `%s` %s' % (column.name, ctype))
if len(table.key) > 0:
coldefs.append(' PRIMARY KEY (%s)' %
self._collist(table, table.key,
utf8_size=utf8_size))
sql.append(',\n'.join(coldefs) + '\n)')
yield '\n'.join(sql)
for index in table.indices:
unique = 'UNIQUE' if index.unique else ''
yield 'CREATE %s INDEX %s_%s_idx ON %s (%s);' % (unique, table.name,
'_'.join(index.columns), table.name,
self._collist(table, index.columns, utf8_size=utf8_size))
def alter_column_types(self, table, columns):
"""Yield SQL statements altering the type of one or more columns of
a table.
Type changes are specified as a `columns` dict mapping column names
to `(from, to)` SQL type tuples.
"""
alterations = []
for name, (from_, to) in sorted(columns.iteritems()):
to = _type_map.get(to, to)
if to != _type_map.get(from_, from_):
alterations.append((name, to))
if alterations:
yield "ALTER TABLE %s %s" % (table,
', '.join("MODIFY %s %s" % each
for each in alterations))
def backup(self, dest_file):
from subprocess import Popen, PIPE
db_url = self.env.config.get('trac', 'database')
scheme, db_prop = _parse_db_str(db_url)
db_params = db_prop.setdefault('params', {})
db_name = os.path.basename(db_prop['path'])
args = [self.mysqldump_path]
if 'host' in db_prop:
args.extend(['-h', db_prop['host']])
if 'port' in db_prop:
args.extend(['-P', str(db_prop['port'])])
if 'user' in db_prop:
args.extend(['-u', db_prop['user']])
for name, value in db_params.iteritems():
if name == 'compress' and as_int(value, 0):
args.append('--compress')
elif name == 'named_pipe' and as_int(value, 0):
args.append('--protocol=pipe')
elif name == 'read_default_file': # Must be first
args.insert(1, '--defaults-file=' + value)
elif name == 'unix_socket':
args.extend(['--protocol=socket', '--socket=' + value])
elif name not in ('init_command', 'read_default_group'):
self.log.warning("Invalid connection string parameter '%s'",
name)
args.extend(['-r', dest_file, db_name])
environ = os.environ.copy()
if 'password' in db_prop:
environ['MYSQL_PWD'] = str(db_prop['password'])
try:
p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds)
except OSError, e:
raise TracError(_("Unable to run %(path)s: %(msg)s",
path=self.mysqldump_path,
msg=exception_to_unicode(e)))
errmsg = p.communicate()[1]
if p.returncode != 0:
raise TracError(_("mysqldump failed: %(msg)s",
msg=to_unicode(errmsg.strip())))
if not os.path.exists(dest_file):
raise TracError(_("No destination file created"))
return dest_file
class MySQLConnection(ConnectionWrapper):
"""Connection wrapper for MySQL."""
poolable = True
def __init__(self, path, log, user=None, password=None, host=None,
port=None, params={}):
if path.startswith('/'):
path = path[1:]
if password == None:
password = ''
if port == None:
port = 3306
opts = {}
for name, value in params.iteritems():
if name in ('init_command', 'read_default_file',
'read_default_group', 'unix_socket'):
opts[name] = value
elif name in ('compress', 'named_pipe'):
opts[name] = as_int(value, 0)
else:
self.log.warning("Invalid connection string parameter '%s'",
name)
cnx = MySQLdb.connect(db=path, user=user, passwd=password, host=host,
port=port, charset='utf8', **opts)
if hasattr(cnx, 'encoders'):
# 'encoders' undocumented but present since 1.2.1 (r422)
cnx.encoders[Markup] = cnx.encoders[types.UnicodeType]
cursor = cnx.cursor()
cursor.execute("SHOW VARIABLES WHERE "
" variable_name='character_set_database'")
self.charset = cursor.fetchone()[1]
if self.charset != 'utf8':
cnx.query("SET NAMES %s" % self.charset)
cnx.store_result()
ConnectionWrapper.__init__(self, cnx, log)
self._is_closed = False
def cast(self, column, type):
if type == 'int' or type == 'int64':
type = 'signed'
elif type == 'text':
type = 'char'
return 'CAST(%s AS %s)' % (column, type)
def concat(self, *args):
return 'concat(%s)' % ', '.join(args)
def like(self):
"""Return a case-insensitive LIKE clause."""
return "LIKE %%s COLLATE %s_general_ci ESCAPE '/'" % self.charset
def like_escape(self, text):
return _like_escape_re.sub(r'/\1', text)
def quote(self, identifier):
"""Return the quoted identifier."""
return "`%s`" % identifier.replace('`', '``')
def get_last_id(self, cursor, table, column='id'):
return cursor.lastrowid
def update_sequence(self, cursor, table, column='id'):
# MySQL handles sequence updates automagically
pass
def rollback(self):
self.cnx.ping()
try:
self.cnx.rollback()
except MySQLdb.ProgrammingError:
self._is_closed = True
def close(self):
if not self._is_closed:
try:
self.cnx.close()
except MySQLdb.ProgrammingError:
pass # this error would mean it's already closed. So, ignore
self._is_closed = True
def cursor(self):
return IterableCursor(MySQLUnicodeCursor(self.cnx), self.log)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/pool.py | trac/trac/db/pool.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import os
import time
from trac.core import TracError
from trac.db.util import ConnectionWrapper
from trac.util.concurrency import threading
from trac.util.text import exception_to_unicode
from trac.util.translation import _
class TimeoutError(Exception):
"""Exception raised by the connection pool when no connection has become
available after a given timeout."""
class PooledConnection(ConnectionWrapper):
"""A database connection that can be pooled. When closed, it gets returned
to the pool.
"""
def __init__(self, pool, cnx, key, tid, log=None):
ConnectionWrapper.__init__(self, cnx, log)
self._pool = pool
self._key = key
self._tid = tid
def close(self):
if self.cnx:
cnx = self.cnx
self.cnx = None
self.log = None
self._pool._return_cnx(cnx, self._key, self._tid)
def __del__(self):
self.close()
class ConnectionPoolBackend(object):
"""A process-wide LRU-based connection pool.
"""
def __init__(self, maxsize):
self._available = threading.Condition(threading.RLock())
self._maxsize = maxsize
self._active = {}
self._pool = []
self._pool_key = []
self._pool_time = []
self._waiters = 0
def get_cnx(self, connector, kwargs, timeout=None):
cnx = None
log = kwargs.get('log')
key = unicode(kwargs)
start = time.time()
tid = threading._get_ident()
# Get a Connection, either directly or a deferred one
with self._available:
# First choice: Return the same cnx already used by the thread
if (tid, key) in self._active:
cnx, num = self._active[(tid, key)]
num += 1
else:
if self._waiters == 0:
cnx = self._take_cnx(connector, kwargs, key, tid)
if not cnx:
self._waiters += 1
self._available.wait()
self._waiters -= 1
cnx = self._take_cnx(connector, kwargs, key, tid)
num = 1
if cnx:
self._active[(tid, key)] = (cnx, num)
deferred = num == 1 and isinstance(cnx, tuple)
err = None
if deferred:
# Potentially lenghty operations must be done without lock held
op, cnx = cnx
try:
if op == 'ping':
cnx.ping()
elif op == 'close':
cnx.close()
if op in ('close', 'create'):
cnx = connector.get_connection(**kwargs)
except TracError, e:
err = e
cnx = None
except Exception, e:
if log:
log.error('Exception caught on %s', op, exc_info=True)
err = e
cnx = None
if cnx:
if deferred:
# replace placeholder with real Connection
with self._available:
self._active[(tid, key)] = (cnx, num)
return PooledConnection(self, cnx, key, tid, log)
if deferred:
# cnx couldn't be reused, clear placeholder
with self._available:
del self._active[(tid, key)]
if op == 'ping': # retry
return self.get_cnx(connector, kwargs)
# if we didn't get a cnx after wait(), something's fishy...
timeout = time.time() - start
errmsg = _("Unable to get database connection within %(time)d seconds.",
time=timeout)
if err:
errmsg += " (%s)" % exception_to_unicode(err)
raise TimeoutError(errmsg)
def _take_cnx(self, connector, kwargs, key, tid):
"""Note: _available lock must be held when calling this method."""
# Second best option: Reuse a live pooled connection
if key in self._pool_key:
idx = self._pool_key.index(key)
self._pool_key.pop(idx)
self._pool_time.pop(idx)
cnx = self._pool.pop(idx)
# If possible, verify that the pooled connection is
# still available and working.
if hasattr(cnx, 'ping'):
return ('ping', cnx)
return cnx
# Third best option: Create a new connection
elif len(self._active) + len(self._pool) < self._maxsize:
return ('create', None)
# Forth best option: Replace a pooled connection with a new one
elif len(self._active) < self._maxsize:
# Remove the LRU connection in the pool
cnx = self._pool.pop(0)
self._pool_key.pop(0)
self._pool_time.pop(0)
return ('close', cnx)
def _return_cnx(self, cnx, key, tid):
# Decrement active refcount, clear slot if 1
with self._available:
assert (tid, key) in self._active
cnx, num = self._active[(tid, key)]
if num == 1:
del self._active[(tid, key)]
else:
self._active[(tid, key)] = (cnx, num - 1)
if num == 1:
# Reset connection outside of critical section
try:
cnx.rollback() # resets the connection
except Exception:
cnx.close()
cnx = None
# Connection available, from reuse or from creation of a new one
with self._available:
if cnx and cnx.poolable:
self._pool.append(cnx)
self._pool_key.append(key)
self._pool_time.append(time.time())
self._available.notify()
def shutdown(self, tid=None):
"""Close pooled connections not used in a while"""
delay = 120
if tid is None:
delay = 0
when = time.time() - delay
with self._available:
if tid is None: # global shutdown, also close active connections
for db, num in self._active.values():
db.close()
self._active = {}
while self._pool_time and self._pool_time[0] <= when:
db = self._pool.pop(0)
db.close()
self._pool_key.pop(0)
self._pool_time.pop(0)
_pool_size = int(os.environ.get('TRAC_DB_POOL_SIZE', 10))
_backend = ConnectionPoolBackend(_pool_size)
class ConnectionPool(object):
def __init__(self, maxsize, connector, **kwargs):
# maxsize not used right now but kept for api compatibility
self._connector = connector
self._kwargs = kwargs
def get_cnx(self, timeout=None):
return _backend.get_cnx(self._connector, self._kwargs, timeout)
def shutdown(self, tid=None):
_backend.shutdown(tid)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/tests/api.py | trac/trac/db/tests/api.py | # -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import unittest
from trac.db.api import DatabaseManager, _parse_db_str, get_column_names, \
with_transaction
from trac.test import EnvironmentStub, Mock
from trac.util.concurrency import ThreadLocal
class Connection(object):
committed = False
rolledback = False
def commit(self):
self.committed = True
def rollback(self):
self.rolledback = True
class Error(Exception):
pass
def make_env(get_cnx):
return Mock(components={DatabaseManager:
Mock(get_connection=get_cnx,
_transaction_local=ThreadLocal(wdb=None, rdb=None))})
class WithTransactionTest(unittest.TestCase):
def test_successful_transaction(self):
db = Connection()
env = make_env(lambda: db)
@with_transaction(env)
def do_transaction(db):
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(db.committed and not db.rolledback)
def test_failed_transaction(self):
db = Connection()
env = make_env(lambda: db)
try:
@with_transaction(env)
def do_transaction(db):
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
pass
self.assertTrue(not db.committed and db.rolledback)
def test_implicit_nesting_success(self):
env = make_env(Connection)
dbs = [None, None]
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(dbs[0].committed and not dbs[0].rolledback)
def test_implicit_nesting_failure(self):
env = make_env(Connection)
dbs = [None, None]
try:
@with_transaction(env)
def level0(db):
dbs[0] = db
try:
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
self.assertTrue(not db.committed and not db.rolledback)
raise
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and dbs[0].rolledback)
def test_explicit_success(self):
db = Connection()
env = make_env(lambda: None)
@with_transaction(env, db)
def do_transaction(idb):
self.assertTrue(idb is db)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
def test_explicit_failure(self):
db = Connection()
env = make_env(lambda: None)
try:
@with_transaction(env, db)
def do_transaction(idb):
self.assertTrue(idb is db)
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
pass
self.assertTrue(not db.committed and not db.rolledback)
def test_implicit_in_explicit_success(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
@with_transaction(env, db)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and not dbs[0].rolledback)
def test_implicit_in_explicit_failure(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
try:
@with_transaction(env, db)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and not dbs[0].rolledback)
def test_explicit_in_implicit_success(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env, db)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(dbs[0].committed and not dbs[0].rolledback)
def test_explicit_in_implicit_failure(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
try:
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env, db)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and dbs[0].rolledback)
def test_invalid_nesting(self):
env = make_env(Connection)
try:
@with_transaction(env)
def level0(db):
@with_transaction(env, Connection())
def level1(db):
raise Error()
raise Error()
raise Error()
except AssertionError:
pass
class ParseConnectionStringTestCase(unittest.TestCase):
def test_sqlite_relative(self):
# Default syntax for specifying DB path relative to the environment
# directory
self.assertEqual(('sqlite', {'path': 'db/trac.db'}),
_parse_db_str('sqlite:db/trac.db'))
def test_sqlite_absolute(self):
# Standard syntax
self.assertEqual(('sqlite', {'path': '/var/db/trac.db'}),
_parse_db_str('sqlite:///var/db/trac.db'))
# Legacy syntax
self.assertEqual(('sqlite', {'path': '/var/db/trac.db'}),
_parse_db_str('sqlite:/var/db/trac.db'))
def test_sqlite_with_timeout_param(self):
# In-memory database
self.assertEqual(('sqlite', {'path': 'db/trac.db',
'params': {'timeout': '10000'}}),
_parse_db_str('sqlite:db/trac.db?timeout=10000'))
def test_sqlite_windows_path(self):
# In-memory database
os_name = os.name
try:
os.name = 'nt'
self.assertEqual(('sqlite', {'path': 'C:/project/db/trac.db'}),
_parse_db_str('sqlite:C|/project/db/trac.db'))
finally:
os.name = os_name
def test_postgres_simple(self):
self.assertEqual(('postgres', {'host': 'localhost', 'path': '/trac'}),
_parse_db_str('postgres://localhost/trac'))
def test_postgres_with_port(self):
self.assertEqual(('postgres', {'host': 'localhost', 'port': 9431,
'path': '/trac'}),
_parse_db_str('postgres://localhost:9431/trac'))
def test_postgres_with_creds(self):
self.assertEqual(('postgres', {'user': 'john', 'password': 'letmein',
'host': 'localhost', 'port': 9431,
'path': '/trac'}),
_parse_db_str('postgres://john:letmein@localhost:9431/trac'))
def test_postgres_with_quoted_password(self):
self.assertEqual(('postgres', {'user': 'john', 'password': ':@/',
'host': 'localhost', 'path': '/trac'}),
_parse_db_str('postgres://john:%3a%40%2f@localhost/trac'))
def test_mysql_simple(self):
self.assertEqual(('mysql', {'host': 'localhost', 'path': '/trac'}),
_parse_db_str('mysql://localhost/trac'))
def test_mysql_with_creds(self):
self.assertEqual(('mysql', {'user': 'john', 'password': 'letmein',
'host': 'localhost', 'port': 3306,
'path': '/trac'}),
_parse_db_str('mysql://john:letmein@localhost:3306/trac'))
class StringsTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def test_insert_unicode(self):
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-unicode', u'ünicöde'))
self.assertEqual([(u'ünicöde',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-unicode'"))
def test_insert_empty(self):
from trac.util.text import empty
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-empty', empty))
self.assertEqual([(u'',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-empty'"))
def test_insert_markup(self):
from genshi.core import Markup
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-markup', Markup(u'<em>märkup</em>')))
self.assertEqual([(u'<em>märkup</em>',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-markup'"))
def test_quote(self):
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('SELECT 1 AS %s' % \
db.quote(r'alpha\`\"\'\\beta``gamma""delta'))
self.assertEqual(r'alpha\`\"\'\\beta``gamma""delta',
get_column_names(cursor)[0])
class ConnectionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def tearDown(self):
self.env.reset_db()
def test_get_last_id(self):
id1 = id2 = None
q = "INSERT INTO report (author) VALUES ('anonymous')"
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(q)
# Row ID correct before...
id1 = db.get_last_id(cursor, 'report')
self.assertNotEqual(0, id1)
db.commit()
cursor.execute(q)
# ... and after commit()
db.commit()
id2 = db.get_last_id(cursor, 'report')
self.assertEqual(id1 + 1, id2)
def test_update_sequence(self):
self.env.db_transaction(
"INSERT INTO report (id, author) VALUES (42, 'anonymous')")
with self.env.db_transaction as db:
cursor = db.cursor()
db.update_sequence(cursor, 'report', 'id')
self.env.db_transaction(
"INSERT INTO report (author) VALUES ('next-id')")
self.assertEqual(43, self.env.db_query(
"SELECT id FROM report WHERE author='next-id'")[0][0])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ParseConnectionStringTestCase, 'test'))
suite.addTest(unittest.makeSuite(StringsTestCase, 'test'))
suite.addTest(unittest.makeSuite(ConnectionTestCase, 'test'))
suite.addTest(unittest.makeSuite(WithTransactionTest, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/tests/util.py | trac/trac/db/tests/util.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.db.util import sql_escape_percent
# TODO: test IterableCursor, ConnectionWrapper
class SQLEscapeTestCase(unittest.TestCase):
def test_sql_escape_percent(self):
self.assertEqual("%", sql_escape_percent("%"))
self.assertEqual("'%%'", sql_escape_percent("'%'"))
self.assertEqual("''%''", sql_escape_percent("''%''"))
self.assertEqual("'''%%'''", sql_escape_percent("'''%'''"))
self.assertEqual("'''%%'", sql_escape_percent("'''%'"))
self.assertEqual("%s", sql_escape_percent("%s"))
self.assertEqual("% %", sql_escape_percent("% %"))
self.assertEqual("%s %i", sql_escape_percent("%s %i"))
self.assertEqual("'%%s'", sql_escape_percent("'%s'"))
self.assertEqual("'%% %%'", sql_escape_percent("'% %'"))
self.assertEqual("'%%s %%i'", sql_escape_percent("'%s %i'"))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SQLEscapeTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/tests/postgres_test.py | trac/trac/db/tests/postgres_test.py | # -*- coding: utf-8 -*-
import re
import unittest
from trac.db import Table, Column, Index
from trac.db.postgres_backend import PostgreSQLConnector, assemble_pg_dsn
from trac.test import EnvironmentStub
class PostgresTableCreationSQLTest(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def _unroll_generator(self, generator):
items = []
for item in generator:
items.append(item)
return items
def _normalize_sql(self, sql_generator):
normalized_commands = []
whitespace_regex = re.compile(r'\s+')
commands = self._unroll_generator(sql_generator)
for command in commands:
command = command.replace('\n', '')
command = whitespace_regex.sub(' ', command)
normalized_commands.append(command)
return normalized_commands
def test_quote_table_name(self):
table = Table('foo bar')
table[Column('name'),]
sql_generator = PostgreSQLConnector(self.env).to_sql(table)
sql_commands = self._normalize_sql(sql_generator)
self.assertEqual(1, len(sql_commands))
self.assertEqual('CREATE TABLE "foo bar" ( "name" text)',
sql_commands[0])
def test_quote_column_names(self):
table = Table('foo')
table[Column('my name'),]
sql_generator = PostgreSQLConnector(self.env).to_sql(table)
sql_commands = self._normalize_sql(sql_generator)
self.assertEqual(1, len(sql_commands))
self.assertEqual('CREATE TABLE "foo" ( "my name" text)',
sql_commands[0])
def test_quote_compound_primary_key_declaration(self):
table = Table('foo bar', key=['my name', 'your name'])
table[Column('my name'), Column('your name'),]
sql_generator = PostgreSQLConnector(self.env).to_sql(table)
sql_commands = self._normalize_sql(sql_generator)
self.assertEqual(1, len(sql_commands))
expected_sql = 'CREATE TABLE "foo bar" ( "my name" text, ' + \
'"your name" text, CONSTRAINT "foo bar_pk" ' +\
'PRIMARY KEY ("my name","your name"))'
self.assertEqual(expected_sql, sql_commands[0])
def test_quote_index_declaration(self):
table = Table('foo')
table[Column('my name'), Index(['my name'])]
sql_generator = PostgreSQLConnector(self.env).to_sql(table)
sql_commands = self._normalize_sql(sql_generator)
self.assertEqual(2, len(sql_commands))
self.assertEqual('CREATE TABLE "foo" ( "my name" text)',
sql_commands[0])
index_sql = 'CREATE INDEX "foo_my name_idx" ON "foo" ("my name")'
self.assertEqual(index_sql, sql_commands[1])
def test_quote_index_declaration_for_multiple_indexes(self):
table = Table('foo')
table[Column('a'), Column('b'),
Index(['a', 'b'])]
sql_generator = PostgreSQLConnector(self.env).to_sql(table)
sql_commands = self._normalize_sql(sql_generator)
self.assertEqual(2, len(sql_commands))
self.assertEqual('CREATE TABLE "foo" ( "a" text, "b" text)',
sql_commands[0])
index_sql = 'CREATE INDEX "foo_a_b_idx" ON "foo" ("a","b")'
self.assertEqual(index_sql, sql_commands[1])
def test_assemble_dsn(self):
values = [
{'path': 't', 'user': 't'},
{'path': 't', 'password': 't'},
{'path': 't', 'host': 't'},
{'path': 't', 'port': 't'},
{'path': 't', 'password': 't', 'user': 't'},
{'path': 't', 'host': 't', 'user': 't'},
{'path': 't', 'user': 't', 'port': 't'},
{'path': 't', 'host': 't', 'password': 't'},
{'path': 't', 'password': 't', 'port': 't'},
{'path': 't', 'host': 't', 'port': 't'},
{'path': 't', 'host': 't', 'password': 't', 'user': 't'},
{'path': 't', 'password': 't', 'user': 't', 'port': 't'},
{'path': 't', 'host': 't', 'user': 't', 'port': 't'},
{'path': 't', 'host': 't', 'password': 't', 'port': 't'},
]
for orig in values:
dsn = assemble_pg_dsn(**orig)
for k, v in orig.iteritems():
orig[k] = "'%s'" % v
continue
orig['dbname'] = "'t'"
del orig['path']
new_values = {'dbname': "'t'"}
for key_value in dsn.split(' '):
k, v = key_value.split('=')
new_values[k] = v
continue
self.assertEqual(new_values, orig)
continue
class PostgresTableAlterationSQLTest(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def test_alter_column_types(self):
connector = PostgreSQLConnector(self.env)
sql = connector.alter_column_types('milestone',
{'due': ('int', 'int64'),
'completed': ('int', 'int64')})
sql = list(sql)
self.assertEqual([
"ALTER TABLE milestone "
"ALTER COLUMN completed TYPE bigint, "
"ALTER COLUMN due TYPE bigint",
], sql)
def test_alter_column_types_same(self):
connector = PostgreSQLConnector(self.env)
sql = connector.alter_column_types('milestone',
{'due': ('int', 'int'),
'completed': ('int', 'int64')})
sql = list(sql)
self.assertEqual([
"ALTER TABLE milestone "
"ALTER COLUMN completed TYPE bigint",
], sql)
def test_alter_column_types_none(self):
connector = PostgreSQLConnector(self.env)
sql = connector.alter_column_types('milestone',
{'due': ('int', 'int')})
self.assertEqual([], list(sql))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PostgresTableCreationSQLTest, 'test'))
suite.addTest(unittest.makeSuite(PostgresTableAlterationSQLTest, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/tests/functional.py | trac/trac/db/tests/functional.py | #!/usr/bin/python
import os
from trac.tests.functional import *
class DatabaseBackupTestCase(FunctionalTestCaseSetup):
def runTest(self):
"""Testing backup"""
env = self._testenv.get_trac_environment()
# raises TracError if backup fails
backup_file = env.backup()
self.assertTrue(os.path.exists(backup_file),
'Backup file was not created.')
self.assertNotEqual(os.path.getsize(backup_file), 0,
'Backup file is zero length.')
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional.testcases
suite = trac.tests.functional.testcases.functionalSuite()
suite.addTest(DatabaseBackupTestCase())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/tests/__init__.py | trac/trac/db/tests/__init__.py | import unittest
from trac.db.tests import api, mysql_test, postgres_test, util
from trac.db.tests.functional import functionalSuite
def suite():
suite = unittest.TestSuite()
suite.addTest(api.suite())
suite.addTest(mysql_test.suite())
suite.addTest(postgres_test.suite())
suite.addTest(util.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/db/tests/mysql_test.py | trac/trac/db/tests/mysql_test.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.db.mysql_backend import MySQLConnector
from trac.test import EnvironmentStub
class MySQLTableAlterationSQLTest(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def test_alter_column_types(self):
connector = MySQLConnector(self.env)
sql = connector.alter_column_types('milestone',
{'due': ('int', 'int64'),
'completed': ('int', 'int64')})
sql = list(sql)
self.assertEqual([
"ALTER TABLE milestone "
"MODIFY completed bigint, "
"MODIFY due bigint",
], sql)
def test_alter_column_types_same(self):
connector = MySQLConnector(self.env)
sql = connector.alter_column_types('milestone',
{'due': ('int', 'int'),
'completed': ('int', 'int64')})
sql = list(sql)
self.assertEqual([
"ALTER TABLE milestone "
"MODIFY completed bigint",
], sql)
def test_alter_column_types_none(self):
connector = MySQLConnector(self.env)
sql = connector.alter_column_types('milestone',
{'due': ('int', 'int')})
self.assertEqual([], list(sql))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MySQLTableAlterationSQLTest, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/report.py | trac/trac/ticket/report.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Christian Boos <cboos@edgewall.org>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
from __future__ import with_statement
import csv
import re
from StringIO import StringIO
from genshi.builder import tag
from trac.config import IntOption
from trac.core import *
from trac.db import get_column_names
from trac.perm import IPermissionRequestor
from trac.resource import Resource, ResourceNotFound
from trac.ticket.api import TicketSystem
from trac.util import as_int, content_disposition
from trac.util.datefmt import format_datetime, format_time, from_utimestamp
from trac.util.presentation import Paginator
from trac.util.text import exception_to_unicode, to_unicode, quote_query_string
from trac.util.translation import _, tag_
from trac.web.api import IRequestHandler, RequestDone
from trac.web.chrome import (INavigationContributor, Chrome,
add_ctxtnav, add_link, add_notice, add_script,
add_stylesheet, add_warning, auth_link,
web_context)
from trac.wiki import IWikiSyntaxProvider, WikiParser
SORT_COLUMN = '@SORT_COLUMN@'
LIMIT_OFFSET = '@LIMIT_OFFSET@'
def cell_value(v):
"""Normalize a cell value for display.
>>> (cell_value(None), cell_value(0), cell_value(1), cell_value('v'))
('', '0', u'1', u'v')
"""
return '0' if v is 0 else unicode(v) if v else ''
_sql_re = re.compile(r'''
--.*$ # single line "--" comment
| /\*([^*/]|\*[^/]|/[^*])*\*/ # C style comment
| '(\\.|[^'\\])*' # literal string
| \([^()]+\) # parenthesis group
''', re.MULTILINE | re.VERBOSE)
def _expand_with_space(m):
return ' ' * len(m.group(0))
def sql_skeleton(sql):
"""Strip an SQL query to leave only its toplevel structure.
This is probably not 100% robust but should be enough for most
needs.
>>> re.sub('\s+', lambda m: '<%d>' % len(m.group(0)), sql_skeleton(''' \\n\
SELECT a FROM (SELECT x FROM z ORDER BY COALESCE(u, ')/*(')) ORDER \\n\
/* SELECT a FROM (SELECT x /* FROM z \\n\
ORDER BY */ COALESCE(u, '\)X(')) ORDER */ \\n\
BY c, (SELECT s FROM f WHERE v in ('ORDER BY', '(\\')') \\n\
ORDER BY (1), '') -- LIMIT \\n\
'''))
'<10>SELECT<1>a<1>FROM<48>ORDER<164>BY<1>c,<144>'
"""
old = None
while sql != old:
old = sql
sql = _sql_re.sub(_expand_with_space, old)
return old
_order_by_re = re.compile(r'ORDER\s+BY', re.MULTILINE)
def split_sql(sql, clause_re, skel=None):
"""Split an SQL query according to a toplevel clause regexp.
We assume there's only one such clause present in the outer query.
>>> split_sql('''SELECT a FROM x ORDER \
BY u, v''', _order_by_re)
('SELECT a FROM x ', ' u, v')
"""
if skel is None:
skel = sql_skeleton(sql)
blocks = clause_re.split(skel.upper())
if len(blocks) == 2:
return sql[:len(blocks[0])], sql[-len(blocks[1]):] # (before, after)
else:
return sql, '' # no single clause separator
class ReportModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
IWikiSyntaxProvider)
items_per_page = IntOption('report', 'items_per_page', 100,
"""Number of tickets displayed per page in ticket reports,
by default (''since 0.11'')""")
items_per_page_rss = IntOption('report', 'items_per_page_rss', 0,
"""Number of tickets displayed in the rss feeds for reports
(''since 0.11'')""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'tickets'
def get_navigation_items(self, req):
if 'REPORT_VIEW' in req.perm:
yield ('mainnav', 'tickets', tag.a(_('View Tickets'),
href=req.href.report()))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['REPORT_CREATE', 'REPORT_DELETE', 'REPORT_MODIFY',
'REPORT_SQL_VIEW', 'REPORT_VIEW']
return actions + [('REPORT_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/report(?:/(?:([0-9]+)|-1))?$', req.path_info)
if match:
if match.group(1):
req.args['id'] = match.group(1)
return True
def process_request(self, req):
req.perm.require('REPORT_VIEW')
# did the user ask for any special report?
id = int(req.args.get('id', -1))
action = req.args.get('action', 'view')
data = {}
if req.method == 'POST':
if action == 'new':
self._do_create(req)
elif action == 'delete':
self._do_delete(req, id)
elif action == 'edit':
self._do_save(req, id)
elif action in ('copy', 'edit', 'new'):
template = 'report_edit.html'
data = self._render_editor(req, id, action=='copy')
Chrome(self.env).add_wiki_toolbars(req)
elif action == 'delete':
template = 'report_delete.html'
data = self._render_confirm_delete(req, id)
elif id == -1:
template, data, content_type = self._render_list(req)
if content_type: # i.e. alternate format
return template, data, content_type
if action == 'clear':
if 'query_href' in req.session:
del req.session['query_href']
if 'query_tickets' in req.session:
del req.session['query_tickets']
else:
template, data, content_type = self._render_view(req, id)
if content_type: # i.e. alternate format
return template, data, content_type
if id != -1 or action == 'new':
add_ctxtnav(req, _('Available Reports'), href=req.href.report())
add_link(req, 'up', req.href.report(), _('Available Reports'))
else:
add_ctxtnav(req, _('Available Reports'))
# Kludge: only show link to custom query if the query module
# is actually enabled
from trac.ticket.query import QueryModule
if 'TICKET_VIEW' in req.perm and \
self.env.is_component_enabled(QueryModule):
add_ctxtnav(req, _('Custom Query'), href=req.href.query())
data['query_href'] = req.href.query()
data['saved_query_href'] = req.session.get('query_href')
else:
data['query_href'] = None
add_stylesheet(req, 'common/css/report.css')
return template, data, None
# Internal methods
def _do_create(self, req):
req.perm.require('REPORT_CREATE')
if 'cancel' in req.args:
req.redirect(req.href.report())
title = req.args.get('title', '')
query = req.args.get('query', '')
description = req.args.get('description', '')
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute("""
INSERT INTO report (title,query,description) VALUES (%s,%s,%s)
""", (title, query, description))
report_id = db.get_last_id(cursor, 'report')
add_notice(req, _("The report has been created."))
req.redirect(req.href.report(report_id))
def _do_delete(self, req, id):
req.perm.require('REPORT_DELETE')
if 'cancel' in req.args:
req.redirect(req.href.report(id))
self.env.db_transaction("DELETE FROM report WHERE id=%s", (id,))
add_notice(req, _("The report {%(id)d} has been deleted.", id=id))
req.redirect(req.href.report())
def _do_save(self, req, id):
"""Save report changes to the database"""
req.perm.require('REPORT_MODIFY')
if 'cancel' not in req.args:
title = req.args.get('title', '')
query = req.args.get('query', '')
description = req.args.get('description', '')
self.env.db_transaction("""
UPDATE report SET title=%s, query=%s, description=%s
WHERE id=%s
""", (title, query, description, id))
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.report(id))
def _render_confirm_delete(self, req, id):
req.perm.require('REPORT_DELETE')
for title, in self.env.db_query("""
SELECT title FROM report WHERE id=%s
""", (id,)):
return {'title': _("Delete Report {%(num)s} %(title)s", num=id,
title=title),
'action': 'delete',
'report': {'id': id, 'title': title}}
else:
raise TracError(_("Report {%(num)s} does not exist.", num=id),
_("Invalid Report Number"))
def _render_editor(self, req, id, copy):
if id != -1:
req.perm.require('REPORT_MODIFY')
for title, description, query in self.env.db_query(
"SELECT title, description, query FROM report WHERE id=%s",
(id,)):
break
else:
raise TracError(_("Report {%(num)s} does not exist.", num=id),
_("Invalid Report Number"))
else:
req.perm.require('REPORT_CREATE')
title = description = query = ''
# an explicitly given 'query' parameter will override the saved query
query = req.args.get('query', query)
if copy:
title += ' (copy)'
if copy or id == -1:
data = {'title': _('Create New Report'),
'action': 'new',
'error': None}
else:
data = {'title': _('Edit Report {%(num)d} %(title)s', num=id,
title=title),
'action': 'edit',
'error': req.args.get('error')}
data['report'] = {'id': id, 'title': title,
'sql': query, 'description': description}
return data
def _render_list(self, req):
"""Render the list of available reports."""
sort = req.args.get('sort', 'report')
asc = bool(int(req.args.get('asc', 1)))
format = req.args.get('format')
rows = self.env.db_query("""
SELECT id, title, description FROM report ORDER BY %s %s
""" % ('title' if sort == 'title' else 'id',
'' if asc else 'DESC'))
if format == 'rss':
data = {'rows': rows}
return 'report_list.rss', data, 'application/rss+xml'
elif format == 'csv':
self._send_csv(req, ['report', 'title', 'description'],
rows, mimetype='text/csv',
filename='reports.csv')
elif format == 'tab':
self._send_csv(req, ['report', 'title', 'description'],
rows, '\t', mimetype='text/tab-separated-values',
filename='reports.tsv')
def report_href(**kwargs):
return req.href.report(sort=req.args.get('sort'),
asc='1' if asc else '0', **kwargs)
add_link(req, 'alternate',
auth_link(req, report_href(format='rss')),
_('RSS Feed'), 'application/rss+xml', 'rss')
add_link(req, 'alternate', report_href(format='csv'),
_('Comma-delimited Text'), 'text/plain')
add_link(req, 'alternate', report_href(format='tab'),
_('Tab-delimited Text'), 'text/plain')
reports = [(id, title, description,
'REPORT_MODIFY' in req.perm('report', id),
'REPORT_DELETE' in req.perm('report', id))
for id, title, description in rows]
data = {'reports': reports, 'sort': sort, 'asc': asc}
return 'report_list.html', data, None
_html_cols = set(['__class__', '__style__', '__color__', '__fgcolor__',
'__bgcolor__', '__grouplink__'])
def _render_view(self, req, id):
"""Retrieve the report results and pre-process them for rendering."""
for title, sql, description in self.env.db_query("""
SELECT title, query, description from report WHERE id=%s
""", (id,)):
break
else:
raise ResourceNotFound(_("Report {%(num)s} does not exist.",
num=id), _("Invalid Report Number"))
try:
args = self.get_var_args(req)
except ValueError, e:
raise TracError(_("Report failed: %(error)s", error=e))
# If this is a saved custom query, redirect to the query module
#
# A saved query is either an URL query (?... or query:?...),
# or a query language expression (query:...).
#
# It may eventually contain newlines, for increased clarity.
#
query = ''.join([line.strip() for line in sql.splitlines()])
if query and (query[0] == '?' or query.startswith('query:?')):
query = query if query[0] == '?' else query[6:]
report_id = 'report=%s' % id
if 'report=' in query:
if not report_id in query:
err = _('When specified, the report number should be '
'"%(num)s".', num=id)
req.redirect(req.href.report(id, action='edit', error=err))
else:
if query[-1] != '?':
query += '&'
query += report_id
req.redirect(req.href.query() + quote_query_string(query))
elif query.startswith('query:'):
try:
from trac.ticket.query import Query, QuerySyntaxError
query = Query.from_string(self.env, query[6:], report=id)
req.redirect(query.get_href(req))
except QuerySyntaxError, e:
req.redirect(req.href.report(id, action='edit',
error=to_unicode(e)))
format = req.args.get('format')
if format == 'sql':
self._send_sql(req, id, title, description, sql)
title = '{%i} %s' % (id, title)
report_resource = Resource('report', id)
req.perm.require('REPORT_VIEW', report_resource)
context = web_context(req, report_resource)
page = int(req.args.get('page', '1'))
default_max = {'rss': self.items_per_page_rss,
'csv': 0, 'tab': 0}.get(format, self.items_per_page)
max = req.args.get('max')
limit = as_int(max, default_max, min=0) # explict max takes precedence
offset = (page - 1) * limit
sort_col = req.args.get('sort', '')
asc = req.args.get('asc', 1)
asc = bool(int(asc)) # string '0' or '1' to int/boolean
def report_href(**kwargs):
"""Generate links to this report preserving user variables,
and sorting and paging variables.
"""
params = args.copy()
if sort_col:
params['sort'] = sort_col
params['page'] = page
if max:
params['max'] = max
params.update(kwargs)
params['asc'] = '1' if params.get('asc', asc) else '0'
return req.href.report(id, params)
data = {'action': 'view',
'report': {'id': id, 'resource': report_resource},
'context': context,
'title': title, 'description': description,
'max': limit, 'args': args, 'show_args_form': False,
'message': None, 'paginator': None,
'report_href': report_href,
}
res = None
with self.env.db_query as db:
res = self.execute_paginated_report(req, db, id, sql, args, limit,
offset)
if len(res) == 2:
e, sql = res
data['message'] = \
tag_("Report execution failed: %(error)s %(sql)s",
error=tag.pre(exception_to_unicode(e)),
sql=tag(tag.hr(),
tag.pre(sql, style="white-space: pre")))
return 'report_view.html', data, None
cols, results, num_items, missing_args, limit_offset = res
need_paginator = limit > 0 and limit_offset
need_reorder = limit_offset is None
results = [list(row) for row in results]
numrows = len(results)
paginator = None
if need_paginator:
paginator = Paginator(results, page - 1, limit, num_items)
data['paginator'] = paginator
if paginator.has_next_page:
add_link(req, 'next', report_href(page=page + 1),
_('Next Page'))
if paginator.has_previous_page:
add_link(req, 'prev', report_href(page=page - 1),
_('Previous Page'))
pagedata = []
shown_pages = paginator.get_shown_pages(21)
for p in shown_pages:
pagedata.append([report_href(page=p), None, str(p),
_('Page %(num)d', num=p)])
fields = ['href', 'class', 'string', 'title']
paginator.shown_pages = [dict(zip(fields, p)) for p in pagedata]
paginator.current_page = {'href': None, 'class': 'current',
'string': str(paginator.page + 1),
'title': None}
numrows = paginator.num_items
# Place retrieved columns in groups, according to naming conventions
# * _col_ means fullrow, i.e. a group with one header
# * col_ means finish the current group and start a new one
field_labels = TicketSystem(self.env).get_ticket_field_labels()
header_groups = [[]]
for idx, col in enumerate(cols):
if col in field_labels:
title = field_labels[col]
else:
title = col.strip('_').capitalize()
header = {
'col': col,
'title': title,
'hidden': False,
'asc': None,
}
if col == sort_col:
header['asc'] = asc
if not paginator and need_reorder:
# this dict will have enum values for sorting
# and will be used in sortkey(), if non-empty:
sort_values = {}
if sort_col in ('status', 'resolution', 'priority',
'severity'):
# must fetch sort values for that columns
# instead of comparing them as strings
with self.env.db_query as db:
for name, value in db(
"SELECT name, %s FROM enum WHERE type=%%s"
% db.cast('value', 'int'),
(sort_col,)):
sort_values[name] = value
def sortkey(row):
val = row[idx]
# check if we have sort_values, then use them as keys.
if sort_values:
return sort_values.get(val)
# otherwise, continue with string comparison:
if isinstance(val, basestring):
val = val.lower()
return val
results = sorted(results, key=sortkey, reverse=(not asc))
header_group = header_groups[-1]
if col.startswith('__') and col.endswith('__'): # __col__
header['hidden'] = True
elif col[0] == '_' and col[-1] == '_': # _col_
header_group = []
header_groups.append(header_group)
header_groups.append([])
elif col[0] == '_': # _col
header['hidden'] = True
elif col[-1] == '_': # col_
header_groups.append([])
header_group.append(header)
# Structure the rows and cells:
# - group rows according to __group__ value, if defined
# - group cells the same way headers are grouped
chrome = Chrome(self.env)
row_groups = []
authorized_results = []
prev_group_value = None
for row_idx, result in enumerate(results):
col_idx = 0
cell_groups = []
row = {'cell_groups': cell_groups}
realm = 'ticket'
parent_realm = ''
parent_id = ''
email_cells = []
for header_group in header_groups:
cell_group = []
for header in header_group:
value = cell_value(result[col_idx])
cell = {'value': value, 'header': header, 'index': col_idx}
col = header['col']
col_idx += 1
# Detect and create new group
if col == '__group__' and value != prev_group_value:
prev_group_value = value
# Brute force handling of email in group by header
row_groups.append(
(value and chrome.format_author(req, value), []))
# Other row properties
row['__idx__'] = row_idx
if col in self._html_cols:
row[col] = value
if col in ('report', 'ticket', 'id', '_id'):
row['id'] = value
# Special casing based on column name
col = col.strip('_')
if col in ('reporter', 'cc', 'owner'):
email_cells.append(cell)
elif col == 'realm':
realm = value
elif col == 'parent_realm':
parent_realm = value
elif col == 'parent_id':
parent_id = value
cell_group.append(cell)
cell_groups.append(cell_group)
if parent_realm:
resource = Resource(realm, row.get('id'),
parent=Resource(parent_realm, parent_id))
else:
resource = Resource(realm, row.get('id'))
# FIXME: for now, we still need to hardcode the realm in the action
if resource.realm.upper()+'_VIEW' not in req.perm(resource):
continue
authorized_results.append(result)
if email_cells:
for cell in email_cells:
emails = chrome.format_emails(context.child(resource),
cell['value'])
result[cell['index']] = cell['value'] = emails
row['resource'] = resource
if row_groups:
row_group = row_groups[-1][1]
else:
row_group = []
row_groups = [(None, row_group)]
row_group.append(row)
data.update({'header_groups': header_groups,
'row_groups': row_groups,
'numrows': numrows})
if format == 'rss':
data['email_map'] = chrome.get_email_map()
data['context'] = web_context(req, report_resource,
absurls=True)
return 'report.rss', data, 'application/rss+xml'
elif format == 'csv':
filename = 'report_%s.csv' % id if id else 'report.csv'
self._send_csv(req, cols, authorized_results, mimetype='text/csv',
filename=filename)
elif format == 'tab':
filename = 'report_%s.tsv' % id if id else 'report.tsv'
self._send_csv(req, cols, authorized_results, '\t',
mimetype='text/tab-separated-values',
filename=filename)
else:
p = page if max is not None else None
add_link(req, 'alternate',
auth_link(req, report_href(format='rss', page=None)),
_('RSS Feed'), 'application/rss+xml', 'rss')
add_link(req, 'alternate', report_href(format='csv', page=p),
_('Comma-delimited Text'), 'text/plain')
add_link(req, 'alternate', report_href(format='tab', page=p),
_('Tab-delimited Text'), 'text/plain')
if 'REPORT_SQL_VIEW' in req.perm:
add_link(req, 'alternate',
req.href.report(id=id, format='sql'),
_('SQL Query'), 'text/plain')
# reuse the session vars of the query module so that
# the query navigation links on the ticket can be used to
# navigate report results as well
try:
req.session['query_tickets'] = \
' '.join([str(int(row['id']))
for rg in row_groups for row in rg[1]])
req.session['query_href'] = \
req.session['query_href'] = report_href()
# Kludge: we have to clear the other query session
# variables, but only if the above succeeded
for var in ('query_constraints', 'query_time'):
if var in req.session:
del req.session[var]
except (ValueError, KeyError):
pass
if set(data['args']) - set(['USER']):
data['show_args_form'] = True
add_script(req, 'common/js/folding.js')
if missing_args:
add_warning(req, _(
'The following arguments are missing: %(args)s',
args=", ".join(missing_args)))
return 'report_view.html', data, None
def execute_report(self, req, db, id, sql, args):
"""Execute given sql report (0.10 backward compatibility method)
:see: ``execute_paginated_report``
"""
res = self.execute_paginated_report(req, db, id, sql, args)
if len(res) == 2:
raise res[0]
return res[:5]
def execute_paginated_report(self, req, db, id, sql, args,
limit=0, offset=0):
sql, args, missing_args = self.sql_sub_vars(sql, args, db)
if not sql:
raise TracError(_("Report {%(num)s} has no SQL query.", num=id))
self.log.debug('Report {%d} with SQL "%s"', id, sql)
self.log.debug('Request args: %r', req.args)
cursor = db.cursor()
num_items = 0
order_by = []
limit_offset = None
base_sql = sql.replace(SORT_COLUMN, '1').replace(LIMIT_OFFSET, '')
if id == -1 or limit == 0:
sql = base_sql
else:
# The number of tickets is obtained
count_sql = 'SELECT COUNT(*) FROM (\n%s\n) AS tab' % base_sql
self.log.debug("Report {%d} SQL (count): %s", id, count_sql)
try:
cursor.execute(count_sql, args)
except Exception, e:
return e, count_sql
num_items = cursor.fetchone()[0]
# The column names are obtained
colnames_sql = 'SELECT * FROM (\n%s\n) AS tab LIMIT 1' % base_sql
self.log.debug("Report {%d} SQL (col names): %s", id, colnames_sql)
try:
cursor.execute(colnames_sql, args)
except Exception, e:
return e, colnames_sql
cols = get_column_names(cursor)
# The ORDER BY columns are inserted
sort_col = req.args.get('sort', '')
asc = req.args.get('asc', '1')
self.log.debug("%r %s (%s)", cols, sort_col, asc and '^' or 'v')
order_cols = []
if sort_col and sort_col not in cols:
raise TracError(_('Query parameter "sort=%(sort_col)s" '
' is invalid', sort_col=sort_col))
skel = None
if '__group__' in cols:
order_cols.append('__group__')
if sort_col:
sort_col = '%s %s' % (db.quote(sort_col),
asc == '1' and 'ASC' or 'DESC')
if SORT_COLUMN in sql:
# Method 1: insert sort_col at specified position
sql = sql.replace(SORT_COLUMN, sort_col or '1')
elif sort_col:
# Method 2: automagically insert sort_col (and __group__
# before it, if __group__ was specified) as first criterions
if '__group__' in cols:
order_by.append('__group__ ASC')
order_by.append(sort_col)
# is there already an ORDER BY in the original sql?
skel = sql_skeleton(sql)
before, after = split_sql(sql, _order_by_re, skel)
if after: # there were some other criterions, keep them
order_by.append(after)
sql = ' '.join([before, 'ORDER BY', ', '.join(order_by)])
# Add LIMIT/OFFSET if pagination needed
limit_offset = ''
if num_items > limit:
limit_offset = ' '.join(['LIMIT', str(limit),
'OFFSET', str(offset)])
if LIMIT_OFFSET in sql:
# Method 1: insert LIMIT/OFFSET at specified position
sql = sql.replace(LIMIT_OFFSET, limit_offset)
else:
# Method 2: limit/offset is added unless already present
skel = skel or sql_skeleton(sql)
if 'LIMIT' not in skel.upper():
sql = ' '.join([sql, limit_offset])
self.log.debug("Report {%d} SQL (order + limit): %s", id, sql)
try:
cursor.execute(sql, args)
except Exception, e:
if order_by or limit_offset:
add_notice(req, _("Hint: if the report failed due to automatic"
" modification of the ORDER BY clause or the"
" addition of LIMIT/OFFSET, please look up"
" %(sort_column)s and %(limit_offset)s in"
" TracReports to see how to gain complete"
" control over report rewriting.",
sort_column=SORT_COLUMN,
limit_offset=LIMIT_OFFSET))
return e, sql
rows = cursor.fetchall() or []
cols = get_column_names(cursor)
return cols, rows, num_items, missing_args, limit_offset
def get_var_args(self, req):
# reuse somehow for #9574 (wiki vars)
report_args = {}
for arg in req.args.keys():
if not arg.isupper():
continue
report_args[arg] = to_unicode(req.args.get(arg))
# Set some default dynamic variables
if 'USER' not in report_args:
report_args['USER'] = req.authname
return report_args
def sql_sub_vars(self, sql, args, db=None):
"""Extract $XYZ-style variables from the `sql` query.
:since 1.0: the `db` parameter is no longer needed and will be removed
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/api.py | trac/trac/ticket/api.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
import copy
import re
from genshi.builder import tag
from genshi.core import Markup, unescape
from trac.cache import cached
from trac.config import *
from trac.core import *
from trac.perm import IPermissionRequestor, PermissionCache, PermissionSystem
from trac.resource import IResourceManager, ResourceSystem
from trac.util import Ranges, as_int
from trac.util.text import shorten_line
from trac.util.translation import _, N_, gettext
from trac.wiki import IWikiSyntaxProvider, WikiParser
class ITicketActionController(Interface):
"""Extension point interface for components willing to participate
in the ticket workflow.
This is mainly about controlling the changes to the ticket ''status'',
though not restricted to it.
"""
def get_ticket_actions(req, ticket):
"""Return an iterable of `(weight, action)` tuples corresponding to
the actions that are contributed by this component.
That list may vary given the current state of the ticket and the
actual request parameter.
`action` is a key used to identify that particular action.
(note that 'history' and 'diff' are reserved and should not be used
by plugins)
The actions will be presented on the page in descending order of the
integer weight. The first action in the list is used as the default
action.
When in doubt, use a weight of 0."""
def get_all_status():
"""Returns an iterable of all the possible values for the ''status''
field this action controller knows about.
This will be used to populate the query options and the like.
It is assumed that the initial status of a ticket is 'new' and
the terminal status of a ticket is 'closed'.
"""
def render_ticket_action_control(req, ticket, action):
"""Return a tuple in the form of `(label, control, hint)`
`label` is a short text that will be used when listing the action,
`control` is the markup for the action control and `hint` should
explain what will happen if this action is taken.
This method will only be called if the controller claimed to handle
the given `action` in the call to `get_ticket_actions`.
Note that the radio button for the action has an `id` of
`"action_%s" % action`. Any `id`s used in `control` need to be made
unique. The method used in the default ITicketActionController is to
use `"action_%s_something" % action`.
"""
def get_ticket_changes(req, ticket, action):
"""Return a dictionary of ticket field changes.
This method must not have any side-effects because it will also
be called in preview mode (`req.args['preview']` will be set, then).
See `apply_action_side_effects` for that. If the latter indeed triggers
some side-effects, it is advised to emit a warning
(`trac.web.chrome.add_warning(req, reason)`) when this method is called
in preview mode.
This method will only be called if the controller claimed to handle
the given `action` in the call to `get_ticket_actions`.
"""
def apply_action_side_effects(req, ticket, action):
"""Perform side effects once all changes have been made to the ticket.
Multiple controllers might be involved, so the apply side-effects
offers a chance to trigger a side-effect based on the given `action`
after the new state of the ticket has been saved.
This method will only be called if the controller claimed to handle
the given `action` in the call to `get_ticket_actions`.
"""
class ITicketChangeListener(Interface):
"""Extension point interface for components that require notification
when tickets are created, modified, or deleted."""
def ticket_created(ticket):
"""Called when a ticket is created."""
def ticket_changed(ticket, comment, author, old_values):
"""Called when a ticket is modified.
`old_values` is a dictionary containing the previous values of the
fields that have changed.
"""
def ticket_deleted(ticket):
"""Called when a ticket is deleted."""
class ITicketManipulator(Interface):
"""Miscellaneous manipulation of ticket workflow features."""
def prepare_ticket(req, ticket, fields, actions):
"""Not currently called, but should be provided for future
compatibility."""
def validate_ticket(req, ticket):
"""Validate a ticket after it's been populated from user input.
Must return a list of `(field, message)` tuples, one for each problem
detected. `field` can be `None` to indicate an overall problem with the
ticket. Therefore, a return value of `[]` means everything is OK."""
class IMilestoneChangeListener(Interface):
"""Extension point interface for components that require notification
when milestones are created, modified, or deleted."""
def milestone_created(milestone):
"""Called when a milestone is created."""
def milestone_changed(milestone, old_values):
"""Called when a milestone is modified.
`old_values` is a dictionary containing the previous values of the
milestone properties that changed. Currently those properties can be
'name', 'due', 'completed', or 'description'.
"""
def milestone_deleted(milestone):
"""Called when a milestone is deleted."""
class ITicketFieldProvider(Interface):
"""Extension point interface for components that provide fields for the
ticket system."""
def get_select_fields():
"""Returns a list of select fields, each as a tuple of
(rank, field)
where field is a dictionary that defines:
* name: the field name
* pk: the primary key of the field table
* label: the label to display, preferably wrapped with N_()
* cls: the model describing the field
the following keys can also usefully be defined:
* optional: a boolean specifying that the select can be empty
The rank is expected to be an integer to specify the sorting of the
select and radio fields. This is not intended to allow for the extent
of configurability of the custom fields but allows a plugin to mix in
fields as if they are a first class member of the ticket system.
"""
def get_radio_fields():
"""Returns a list of radio fields, each as a tuple of
(rank, field)
See the documentation for get_select_fields for descriptions of rank and
field.
Note that in contrast to get_select_fields, radio fields will all be
specified as optional.
"""
def get_raw_fields():
"""Returns a list of fields, each represents ticket field
dictionary. For example:
* name: field name
* type: field type
* label: the label to display, preferably wrapped with N_()
* format: field format
* other appropriate field properties
"""
class TicketSystem(Component):
implements(IPermissionRequestor, IWikiSyntaxProvider, IResourceManager,
ITicketFieldProvider)
ticket_field_providers = ExtensionPoint(ITicketFieldProvider)
change_listeners = ExtensionPoint(ITicketChangeListener)
milestone_change_listeners = ExtensionPoint(IMilestoneChangeListener)
ticket_custom_section = ConfigSection('ticket-custom',
"""In this section, you can define additional fields for tickets. See
TracTicketsCustomFields for more details.""")
action_controllers = OrderedExtensionsOption('ticket', 'workflow',
ITicketActionController, default='ConfigurableTicketWorkflow',
include_missing=False,
doc="""Ordered list of workflow controllers to use for ticket actions
(''since 0.11'').""")
restrict_owner = BoolOption('ticket', 'restrict_owner', 'false',
"""Make the owner field of tickets use a drop-down menu.
Be sure to understand the performance implications before activating
this option. See
[TracTickets#Assign-toasDrop-DownList Assign-to as Drop-Down List].
Please note that e-mail addresses are '''not''' obfuscated in the
resulting drop-down menu, so this option should not be used if
e-mail addresses must remain protected.
(''since 0.9'')""")
default_version = Option('ticket', 'default_version', '',
"""Default version for newly created tickets.""")
default_type = Option('ticket', 'default_type', 'defect',
"""Default type for newly created tickets (''since 0.9'').""")
default_priority = Option('ticket', 'default_priority', 'major',
"""Default priority for newly created tickets.""")
default_milestone = Option('ticket', 'default_milestone', '',
"""Default milestone for newly created tickets.""")
default_component = Option('ticket', 'default_component', '',
"""Default component for newly created tickets.""")
default_severity = Option('ticket', 'default_severity', '',
"""Default severity for newly created tickets.""")
default_summary = Option('ticket', 'default_summary', '',
"""Default summary (title) for newly created tickets.""")
default_description = Option('ticket', 'default_description', '',
"""Default description for newly created tickets.""")
default_keywords = Option('ticket', 'default_keywords', '',
"""Default keywords for newly created tickets.""")
default_owner = Option('ticket', 'default_owner', '< default >',
"""Default owner for newly created tickets.""")
default_cc = Option('ticket', 'default_cc', '',
"""Default cc: list for newly created tickets.""")
default_resolution = Option('ticket', 'default_resolution', 'fixed',
"""Default resolution for resolving (closing) tickets
(''since 0.11'').""")
def __init__(self):
self.log.debug('action controllers for ticket workflow: %r' %
[c.__class__.__name__ for c in self.action_controllers])
# Public API
def get_available_actions(self, req, ticket):
"""Returns a sorted list of available actions"""
# The list should not have duplicates.
actions = {}
for controller in self.action_controllers:
weighted_actions = controller.get_ticket_actions(req, ticket) or []
for weight, action in weighted_actions:
if action in actions:
actions[action] = max(actions[action], weight)
else:
actions[action] = weight
all_weighted_actions = [(weight, action) for action, weight in
actions.items()]
return [x[1] for x in sorted(all_weighted_actions, reverse=True)]
def get_all_status(self):
"""Returns a sorted list of all the states all of the action
controllers know about."""
valid_states = set()
for controller in self.action_controllers:
valid_states.update(controller.get_all_status() or [])
return sorted(valid_states)
def get_ticket_field_labels(self):
"""Produce a (name,label) mapping from `get_ticket_fields`."""
labels = dict((f['name'], f['label'])
for f in self.get_ticket_fields())
labels['attachment'] = _("Attachment")
return labels
def get_ticket_fields(self):
"""Returns list of fields available for tickets.
Each field is a dict with at least the 'name', 'label' (localized)
and 'type' keys.
It may in addition contain the 'custom' key, the 'optional' and the
'options' keys. When present 'custom' and 'optional' are always `True`.
"""
fields = copy.deepcopy(self.fields)
label = 'label' # workaround gettext extraction bug
for f in fields:
f[label] = gettext(f[label])
return fields
def reset_ticket_fields(self):
"""Invalidate ticket field cache."""
del self.fields
@cached
def fields(self, db):
"""Return the list of fields available for tickets."""
from trac.ticket import model
fields = []
# Basic text fields
fields.append({'name': 'summary', 'type': 'text',
'label': N_('Summary')})
fields.append({'name': 'reporter', 'type': 'text',
'label': N_('Reporter')})
# Owner field, by default text but can be changed dynamically
# into a drop-down depending on configuration (restrict_owner=true)
field = {'name': 'owner', 'label': N_('Owner')}
field['type'] = 'text'
fields.append(field)
# Description
fields.append({'name': 'description', 'type': 'textarea',
'label': N_('Description')})
# Default select and radio fields
selects = []
[selects.extend(field_provider.get_select_fields())
for field_provider in self.ticket_field_providers]
[select.update({'type': 'select'}) for n, select in selects]
radios = []
[radios.extend(field_provider.get_radio_fields())
for field_provider in self.ticket_field_providers]
[radio.update({'type': 'radio',
'optional': True}) for n, radio in radios]
selects.extend(radios)
selects.sort()
for rank, field in selects:
cls = field['cls']
name = field['name']
pk_field = field.get('pk', 'name')
options = [getattr(val, pk_field)
for val in cls.select(self.env, db=db)]
if not options:
# Fields without possible values are treated as if they didn't
# exist
continue
if 'value' not in field:
field['value'] = getattr(self, 'default_' + name, '')
field['options'] = options
fields.append(field)
# Advanced text fields
fields.append({'name': 'keywords', 'type': 'text', 'format': 'list',
'label': N_('Keywords')})
fields.append({'name': 'cc', 'type': 'text', 'format': 'list',
'label': N_('Cc')})
# Date/time fields
fields.append({'name': 'time', 'type': 'time',
'label': N_('Created')})
fields.append({'name': 'changetime', 'type': 'time',
'label': N_('Modified')})
for field in self.get_custom_fields():
if field['name'] in [f['name'] for f in fields]:
self.log.warning('Duplicate field name "%s" (ignoring)',
field['name'])
continue
if field['name'] in self.reserved_field_names:
self.log.warning('Field name "%s" is a reserved name '
'(ignoring)', field['name'])
continue
if not re.match('^[a-zA-Z][a-zA-Z0-9_]+$', field['name']):
self.log.warning('Invalid name for custom field: "%s" '
'(ignoring)', field['name'])
continue
field['custom'] = True
fields.append(field)
#TODO: this is Bloodhound specific patch to the Trac. Contact Trac
# community about possibility to apply the change to the Trac codebase
self._add_raw_fields_from_field_providers(fields)
return fields
reserved_field_names = ['report', 'order', 'desc', 'group', 'groupdesc',
'col', 'row', 'format', 'max', 'page', 'verbose',
'comment', 'or']
def _add_raw_fields_from_field_providers(self, fields):
for field_provider in self.ticket_field_providers:
if hasattr(field_provider, 'get_raw_fields'):
raw_fields = field_provider.get_raw_fields()
if raw_fields:
for raw_field in raw_fields:
self._add_raw_field(
raw_field, fields)
def _add_raw_field(self, raw_field, fields):
if raw_field["name"] in [f['name'] for f in fields]:
self.log.warning(
'Duplicate field name "%s" (ignoring)', raw_field["name"])
else:
fields.append(raw_field)
def get_custom_fields(self):
return copy.deepcopy(self.custom_fields)
@cached
def custom_fields(self, db):
"""Return the list of custom ticket fields available for tickets."""
fields = []
config = self.ticket_custom_section
for name in [option for option, value in config.options()
if '.' not in option]:
field = {
'name': name,
'type': config.get(name),
'order': config.getint(name + '.order', 0),
'label': config.get(name + '.label') or name.capitalize(),
'value': config.get(name + '.value', '')
}
if field['type'] == 'select' or field['type'] == 'radio':
field['options'] = config.getlist(name + '.options', sep='|')
if '' in field['options']:
field['optional'] = True
field['options'].remove('')
elif field['type'] == 'text':
field['format'] = config.get(name + '.format', 'plain')
elif field['type'] == 'textarea':
field['format'] = config.get(name + '.format', 'plain')
field['width'] = config.getint(name + '.cols')
field['height'] = config.getint(name + '.rows')
fields.append(field)
fields.sort(lambda x, y: cmp((x['order'], x['name']),
(y['order'], y['name'])))
return fields
def get_field_synonyms(self):
"""Return a mapping from field name synonyms to field names.
The synonyms are supposed to be more intuitive for custom queries."""
# i18n TODO - translated keys
return {'created': 'time', 'modified': 'changetime'}
def eventually_restrict_owner(self, field, ticket=None):
"""Restrict given owner field to be a list of users having
the TICKET_MODIFY permission (for the given ticket)
"""
if self.restrict_owner:
field['type'] = 'select'
possible_owners = []
for user in PermissionSystem(self.env) \
.get_users_with_permission('TICKET_MODIFY'):
if not ticket or \
'TICKET_MODIFY' in PermissionCache(self.env, user,
ticket.resource):
possible_owners.append(user)
possible_owners.sort()
possible_owners.insert(0, '< default >')
field['options'] = possible_owners
field['optional'] = True
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_APPEND', 'TICKET_CREATE', 'TICKET_CHGPROP',
'TICKET_VIEW', 'TICKET_EDIT_CC', 'TICKET_EDIT_DESCRIPTION',
'TICKET_EDIT_COMMENT', 'TICKET_BATCH_MODIFY',
('TICKET_MODIFY', ['TICKET_APPEND', 'TICKET_CHGPROP']),
('TICKET_ADMIN', ['TICKET_CREATE', 'TICKET_MODIFY',
'TICKET_VIEW', 'TICKET_EDIT_CC',
'TICKET_EDIT_DESCRIPTION',
'TICKET_EDIT_COMMENT',
'TICKET_BATCH_MODIFY'])]
# IWikiSyntaxProvider methods
def get_link_resolvers(self):
return [('bug', self._format_link),
('ticket', self._format_link),
('comment', self._format_comment_link)]
def get_wiki_syntax(self):
yield (
# matches #... but not &#... (HTML entity)
r"!?(?<!&)#"
# optional intertrac shorthand #T... + digits
r"(?P<it_ticket>%s)%s" % (WikiParser.INTERTRAC_SCHEME,
Ranges.RE_STR),
lambda x, y, z: self._format_link(x, 'ticket', y[1:], y, z))
def _format_link(self, formatter, ns, target, label, fullmatch=None):
intertrac = formatter.shorthand_intertrac_helper(ns, target, label,
fullmatch)
if intertrac:
return intertrac
try:
link, params, fragment = formatter.split_link(target)
r = Ranges(link)
if len(r) == 1:
num = r.a
ticket = formatter.resource('ticket', num)
from trac.ticket.model import Ticket
if Ticket.id_is_valid(num) and \
'TICKET_VIEW' in formatter.perm(ticket):
# TODO: attempt to retrieve ticket view directly,
# something like: t = Ticket.view(num)
for type, summary, status, resolution in \
self.env.db_query("""
SELECT type, summary, status, resolution
FROM ticket WHERE id=%s
""", (str(num),)):
title = self.format_summary(summary, status,
resolution, type)
href = formatter.href.ticket(num) + params + fragment
return tag.a(label, title=title, href=href,
class_='%s ticket' % status)
else:
ranges = str(r)
if params:
params = '&' + params[1:]
if isinstance(label, Markup):
_label = unescape(label)
else:
_label = label
label_wrap = _label.replace(',', u',\u200b')
ranges_wrap = ranges.replace(',', u', ')
return tag.a(label_wrap,
title=_("Tickets %(ranges)s", ranges=ranges_wrap),
href=formatter.href.query(id=ranges) + params)
except ValueError:
pass
return tag.a(label, class_='missing ticket')
def _format_comment_link(self, formatter, ns, target, label):
resource = None
if ':' in target:
elts = target.split(':')
if len(elts) == 3:
cnum, realm, id = elts
if cnum != 'description' and cnum and not cnum[0].isdigit():
realm, id, cnum = elts # support old comment: style
resource = formatter.resource(realm, id)
else:
resource = formatter.resource
cnum = target
if resource and resource.realm == 'ticket':
id = as_int(resource.id, None)
if id is not None:
href = "%s#comment:%s" % (formatter.href.ticket(resource.id),
cnum)
title = _("Comment %(cnum)s for Ticket #%(id)s", cnum=cnum,
id=resource.id)
if 'TICKET_VIEW' in formatter.perm(resource):
for status, in self.env.db_query(
"SELECT status FROM ticket WHERE id=%s", (id,)):
return tag.a(label, href=href, title=title,
class_=status)
return tag.a(label, href=href, title=title)
return label
# IResourceManager methods
def get_resource_realms(self):
yield 'ticket'
def get_resource_description(self, resource, format=None, context=None,
**kwargs):
nbhprefix = ResourceSystem(self.env).neighborhood_prefix(
resource.neighborhood)
if format == 'compact':
return '%s#%s' % (nbhprefix, resource.id)
elif format == 'summary':
from trac.ticket.model import Ticket
ticket = Ticket(self.env, resource.id)
args = [ticket[f] for f in ('summary', 'status', 'resolution',
'type')]
return self.format_summary(*args)
return nbhprefix + _("Ticket #%(shortname)s", shortname=resource.id)
def format_summary(self, summary, status=None, resolution=None, type=None):
summary = shorten_line(summary)
if type:
summary = type + ': ' + summary
if status:
if status == 'closed' and resolution:
status += ': ' + resolution
return "%s (%s)" % (summary, status)
else:
return summary
def resource_exists(self, resource):
"""
>>> from trac.test import EnvironmentStub
>>> from trac.resource import Resource, resource_exists
>>> env = EnvironmentStub()
>>> resource_exists(env, Resource('ticket', 123456))
False
>>> from trac.ticket.model import Ticket
>>> t = Ticket(env)
>>> int(t.insert())
1
>>> resource_exists(env, t.resource)
True
"""
if self.env.db_query("SELECT id FROM ticket WHERE id=%s",
(resource.id,)):
if resource.version is None:
return True
revcount = self.env.db_query("""
SELECT count(DISTINCT time) FROM ticket_change WHERE ticket=%s
""", (resource.id,))
return revcount[0][0] >= resource.version
else:
return False
# ITicketFieldProvider methods
def get_select_fields(self):
"""Default select and radio fields"""
from trac.ticket import model
selects = [(10, {'name': 'type', 'label': N_('Type'),
'cls': model.Type}),
(30, {'name':'priority', 'label': N_('Priority'),
'cls': model.Priority}),
(40, {'name': 'milestone', 'label': N_('Milestone'),
'cls': model.Milestone, 'optional': True}),
(50, {'name': 'component', 'label': N_('Component'),
'cls': model.Component}),
(60, {'name': 'version', 'label': N_('Version'),
'cls': model.Version, 'optional': True}),
(70, {'name': 'severity', 'label': N_('Severity'),
'cls': model.Severity})]
return selects
def get_radio_fields(self):
"""Default radio fields"""
from trac.ticket import model
radios = [(20, {'name': 'status', 'label': N_('Status'),
'cls': model.Status}),
(80, {'name': 'resolution', 'label': N_('Resolution'),
'cls': model.Resolution})]
return radios
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/web_ui.py | trac/trac/ticket/web_ui.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
from __future__ import with_statement
import csv
from datetime import datetime
import pkg_resources
import re
from StringIO import StringIO
from genshi.core import Markup
from genshi.builder import tag
from trac.attachment import AttachmentModule
from trac.config import BoolOption, Option, IntOption
from trac.core import *
from trac.mimeview.api import Mimeview, IContentConverter
from trac.resource import (
Resource, ResourceNotFound, get_resource_url, render_resource_link,
get_resource_shortname
)
from trac.search import ISearchSource, search_to_sql, shorten_result
from trac.ticket.api import TicketSystem, ITicketManipulator
from trac.ticket.model import Milestone, Ticket, group_milestones
from trac.ticket.notification import TicketNotifyEmail
from trac.timeline.api import ITimelineEventProvider
from trac.util import as_bool, as_int, get_reporter_id
from trac.util.datefmt import (
format_datetime, from_utimestamp, to_utimestamp, utc
)
from trac.util.text import (
exception_to_unicode, empty, obfuscate_email_address, shorten_line,
to_unicode
)
from trac.util.presentation import separated
from trac.util.translation import _, tag_, tagn_, N_, gettext, ngettext
from trac.versioncontrol.diff import get_diff_options, diff_blocks
from trac.web import (
IRequestHandler, RequestDone, arg_list_to_args, parse_arg_list
)
from trac.web.chrome import (
Chrome, INavigationContributor, ITemplateProvider,
add_ctxtnav, add_link, add_notice, add_script, add_script_data,
add_stylesheet, add_warning, auth_link, prevnext_nav, web_context
)
from trac.wiki.formatter import format_to, format_to_html, format_to_oneliner
class InvalidTicket(TracError):
"""Exception raised when a ticket fails validation."""
title = N_("Invalid Ticket")
class TicketModule(Component):
implements(IContentConverter, INavigationContributor, IRequestHandler,
ISearchSource, ITemplateProvider, ITimelineEventProvider)
ticket_manipulators = ExtensionPoint(ITicketManipulator)
timeline_details = BoolOption('timeline', 'ticket_show_details', 'false',
"""Enable the display of all ticket changes in the timeline, not only
open / close operations (''since 0.9'').""")
max_description_size = IntOption('ticket', 'max_description_size', 262144,
"""Don't accept tickets with a too big description.
(''since 0.11'').""")
max_comment_size = IntOption('ticket', 'max_comment_size', 262144,
"""Don't accept tickets with a too big comment.
(''since 0.11.2'')""")
timeline_newticket_formatter = Option('timeline', 'newticket_formatter',
'oneliner',
"""Which formatter flavor (e.g. 'html' or 'oneliner') should be
used when presenting the description for new tickets.
If 'oneliner', the [timeline] abbreviated_messages option applies.
(''since 0.11'').""")
preserve_newlines = Option('ticket', 'preserve_newlines', 'default',
"""Whether Wiki formatter should respect the new lines present
in the Wiki text.
If set to 'default', this is equivalent to 'yes' for new environments
but keeps the old behavior for upgraded environments (i.e. 'no').
(''since 0.11'').""")
ticketlink_query = Option('query', 'ticketlink_query',
default='?status=!closed',
doc="""The base query to be used when linkifying values of ticket
fields. The query is a URL query
string starting with `?` as used in `query:`
[TracQuery#UsingTracLinks Trac links].
(''since 0.12'')""")
def __init__(self):
self._warn_for_default_attr = set()
def __getattr__(self, name):
"""Delegate access to ticket default Options which were move to
TicketSystem.
.. todo:: remove in 1.0
"""
if name.startswith('default_'):
if name not in self._warn_for_default_attr:
self.log.warning("%s option should be accessed via "
"TicketSystem component", name)
self._warn_for_default_attr.add(name)
return getattr(TicketSystem(self.env), name)
raise AttributeError("TicketModule has no attribute '%s'" % name)
@property
def must_preserve_newlines(self):
preserve_newlines = self.preserve_newlines
if preserve_newlines == 'default':
preserve_newlines = self.env.get_version(initial=True) >= 21 # 0.11
return as_bool(preserve_newlines)
# IContentConverter methods
def get_supported_conversions(self):
yield ('csv', _("Comma-delimited Text"), 'csv',
'trac.ticket.Ticket', 'text/csv', 8)
yield ('tab', _("Tab-delimited Text"), 'tsv',
'trac.ticket.Ticket', 'text/tab-separated-values', 8)
yield ('rss', _("RSS Feed"), 'xml',
'trac.ticket.Ticket', 'application/rss+xml', 8)
def convert_content(self, req, mimetype, ticket, key):
if key == 'csv':
return self.export_csv(req, ticket, mimetype='text/csv')
elif key == 'tab':
return self.export_csv(req, ticket, sep='\t',
mimetype='text/tab-separated-values')
elif key == 'rss':
return self.export_rss(req, ticket)
# INavigationContributor methods
def get_active_navigation_item(self, req):
if re.match(r'/newticket/?', req.path_info):
return 'newticket'
return 'tickets'
def get_navigation_items(self, req):
if 'TICKET_CREATE' in req.perm:
yield ('mainnav', 'newticket',
tag.a(_("New Ticket"), href=req.href.newticket(),
accesskey=7))
# IRequestHandler methods
def match_request(self, req):
if req.path_info == "/newticket":
return True
match = re.match(r'/ticket/([0-9]+)$', req.path_info)
if match:
req.args['id'] = match.group(1)
return True
def process_request(self, req):
if 'id' in req.args:
if req.path_info == '/newticket':
raise TracError(_("id can't be set for a new ticket request."))
return self._process_ticket_request(req)
return self._process_newticket_request(req)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.ticket', 'templates')]
# ISearchSource methods
def get_search_filters(self, req):
if 'TICKET_VIEW' in req.perm:
yield ('ticket', _("Tickets"))
def get_search_results(self, req, terms, filters):
if not 'ticket' in filters:
return
ticket_realm = Resource('ticket')
with self.env.db_query as db:
sql, args = search_to_sql(db, ['summary', 'keywords',
'description', 'reporter', 'cc',
db.cast('id', 'text')], terms)
sql2, args2 = search_to_sql(db, ['newvalue'], terms)
sql3, args3 = search_to_sql(db, ['value'], terms)
ticketsystem = TicketSystem(self.env)
for summary, desc, author, type, tid, ts, status, resolution in \
db("""SELECT summary, description, reporter, type, id,
time, status, resolution
FROM ticket
WHERE id IN (
SELECT id FROM ticket WHERE %s
UNION
SELECT ticket FROM ticket_change
WHERE field='comment' AND %s
UNION
SELECT ticket FROM ticket_custom WHERE %s
)
""" % (sql, sql2, sql3),
args + args2 + args3):
t = ticket_realm(id=tid)
if 'TICKET_VIEW' in req.perm(t):
yield (req.href.ticket(tid),
tag_("%(title)s: %(message)s",
title=tag.span(
get_resource_shortname(self.env, t),
class_=status),
message=ticketsystem.format_summary(
summary, status, resolution, type)),
from_utimestamp(ts), author,
shorten_result(desc, terms))
# Attachments
for result in AttachmentModule(self.env).get_search_results(
req, ticket_realm, terms):
yield result
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'TICKET_VIEW' in req.perm:
yield ('ticket', _("Tickets opened and closed"))
if self.timeline_details:
yield ('ticket_details', _("Ticket updates"), False)
def get_timeline_events(self, req, start, stop, filters):
ts_start = to_utimestamp(start)
ts_stop = to_utimestamp(stop)
status_map = {'new': ('newticket', 'created'),
'reopened': ('reopenedticket', 'reopened'),
'closed': ('closedticket', 'closed'),
'edit': ('editedticket', 'updated')}
ticket_realm = Resource('ticket')
field_labels = TicketSystem(self.env).get_ticket_field_labels()
def produce_event((id, ts, author, type, summary, description),
status, fields, comment, cid):
ticket = ticket_realm(id=id)
if 'TICKET_VIEW' not in req.perm(ticket):
return None
resolution = fields.get('resolution')
info = ''
if status == 'edit':
if 'ticket_details' in filters:
if len(fields) > 0:
labels = [tag.i(field_labels.get(k, k.capitalize()))
for k in fields.keys()]
info = tagn_("%(labels)s changed",
"%(labels)s changed", len(labels),
labels=separated(labels, ', ')) + tag.br()
else:
return None
elif 'ticket' in filters:
if status == 'closed' and resolution:
if resolution and comment:
info = _("%(title)s: %(message)s", title=resolution,
message='') # typographical translation (fr)
else:
info = resolution
else:
return None
kind, verb = status_map[status]
return (kind, from_utimestamp(ts), author,
(ticket, verb, info, summary, status, resolution, type,
description, comment, cid))
def produce_ticket_change_events(db):
data = None
for id, t, author, type, summary, field, oldvalue, newvalue \
in db("""
SELECT t.id, tc.time, tc.author, t.type, t.summary,
tc.field, tc.oldvalue, tc.newvalue
FROM ticket_change tc
INNER JOIN ticket t ON t.id = tc.ticket
AND tc.time>=%s AND tc.time<=%s
ORDER BY tc.time
""" % (ts_start, ts_stop)):
if not (oldvalue or newvalue):
# ignore empty change corresponding to custom field
# created (None -> '') or deleted ('' -> None)
continue
if not data or (id, t) != data[:2]:
if data:
ev = produce_event(data, status, fields, comment,
cid)
if ev:
yield (ev, data[1])
status, fields, comment, cid = 'edit', {}, '', None
data = (id, t, author, type, summary, None)
if field == 'comment':
comment = newvalue
cid = oldvalue and oldvalue.split('.')[-1]
# Always use the author from the comment field
data = data[:2] + (author,) + data[3:]
elif field == 'status' and \
newvalue in ('reopened', 'closed'):
status = newvalue
elif field[0] != '_':
# properties like _comment{n} are hidden
fields[field] = newvalue
if data:
ev = produce_event(data, status, fields, comment, cid)
if ev:
yield (ev, data[1])
# Ticket changes
with self.env.db_query as db:
if 'ticket' in filters or 'ticket_details' in filters:
prev_t = None
prev_ev = None
batch_ev = None
for (ev, t) in produce_ticket_change_events(db):
if batch_ev:
if prev_t == t:
ticket = ev[3][0]
batch_ev[3][0].append(ticket.id)
else:
yield batch_ev
prev_ev = ev
prev_t = t
batch_ev = None
elif prev_t and prev_t == t:
prev_ticket = prev_ev[3][0]
ticket = ev[3][0]
tickets = [prev_ticket.id, ticket.id]
batch_data = (tickets,) + ev[3][1:]
batch_ev = ('batchmodify', ev[1], ev[2], batch_data)
else:
if prev_ev:
yield prev_ev
prev_ev = ev
prev_t = t
if batch_ev:
yield batch_ev
elif prev_ev:
yield prev_ev
# New tickets
if 'ticket' in filters:
for row in db("""SELECT id, time, reporter, type, summary,
description
FROM ticket WHERE time>=%s AND time<=%s
""", (ts_start, ts_stop)):
ev = produce_event(row, 'new', {}, None, None)
if ev:
yield ev
# Attachments
if 'ticket_details' in filters:
for event in AttachmentModule(self.env).get_timeline_events(
req, ticket_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
kind = event[0]
if kind == 'batchmodify':
return self._render_batched_timeline_event(context, field, event)
ticket, verb, info, summary, status, resolution, type, \
description, comment, cid = event[3]
if field == 'url':
href = context.href.ticket(ticket.id)
if cid:
href += '#comment:' + cid
return href
elif field == 'title':
title = TicketSystem(self.env).format_summary(summary, status,
resolution, type)
message = {
'created': N_("Ticket %(ticketref)s (%(summary)s) created"),
'reopened': N_("Ticket %(ticketref)s (%(summary)s) reopened"),
'closed': N_("Ticket %(ticketref)s (%(summary)s) closed"),
'updated': N_("Ticket %(ticketref)s (%(summary)s) updated"),
}[verb]
return tag_(message,
ticketref=tag.em('#', ticket.id, title=title),
summary=shorten_line(summary))
elif field == 'description':
descr = message = ''
if status == 'new':
message = description
else:
descr = info
message = comment
t_context = context.child(resource=ticket)
t_context.set_hints(preserve_newlines=self.must_preserve_newlines)
if status == 'new' and \
context.get_hint('wiki_flavor') == 'oneliner':
flavor = self.timeline_newticket_formatter
t_context.set_hints(wiki_flavor=flavor,
shorten_lines=flavor == 'oneliner')
return descr + format_to(self.env, None, t_context, message)
def _render_batched_timeline_event(self, context, field, event):
tickets, verb, info, summary, status, resolution, type, \
description, comment, cid = event[3]
tickets = sorted(tickets)
if field == 'url':
return context.href.query(id=','.join(str(t) for t in tickets))
elif field == 'title':
ticketids = u',\u200b'.join(str(t) for t in tickets)
title = _("Tickets %(ticketids)s", ticketids=ticketids)
return tag_("Tickets %(ticketlist)s batch updated",
ticketlist=tag.em('#', ticketids, title=title))
elif field == 'description':
t_context = context()
t_context.set_hints(preserve_newlines=self.must_preserve_newlines)
return info + format_to(self.env, None, t_context, comment)
# Internal methods
def _get_action_controllers(self, req, ticket, action):
"""Generator yielding the controllers handling the given `action`"""
for controller in TicketSystem(self.env).action_controllers:
actions = [a for w, a in
controller.get_ticket_actions(req, ticket) or []]
if action in actions:
yield controller
def _process_newticket_request(self, req):
req.perm.require('TICKET_CREATE')
ticket = Ticket(self.env)
plain_fields = True # support for /newticket?version=0.11 GETs
field_reporter = 'reporter'
if req.method == 'POST':
plain_fields = False
field_reporter = 'field_reporter'
if 'field_owner' in req.args and 'TICKET_MODIFY' not in req.perm:
del req.args['field_owner']
self._populate(req, ticket, plain_fields)
ticket.values['status'] = 'new' # Force initial status
reporter_id = req.args.get(field_reporter) or \
get_reporter_id(req, 'author')
ticket.values['reporter'] = reporter_id
valid = None
if req.method == 'POST' and not 'preview' in req.args:
valid = self._validate_ticket(req, ticket)
if valid:
self._do_create(req, ticket) # (redirected if successful)
# else fall through in a preview
req.args['preview'] = True
# don't validate for new tickets and don't validate twice
if valid is None and 'preview' in req.args:
valid = self._validate_ticket(req, ticket)
# Preview a new ticket
data = self._prepare_data(req, ticket)
data.update({
'author_id': reporter_id,
'actions': [],
'version': None,
'description_change': None,
'valid': valid
})
fields = self._prepare_fields(req, ticket)
# position 'owner' immediately before 'cc',
# if not already positioned after (?)
field_names = [field['name'] for field in ticket.fields
if not field.get('custom')]
if 'owner' in field_names:
curr_idx = field_names.index('owner')
if 'cc' in field_names:
insert_idx = field_names.index('cc')
else:
insert_idx = len(field_names)
if curr_idx < insert_idx:
ticket.fields.insert(insert_idx, ticket.fields[curr_idx])
del ticket.fields[curr_idx]
data['fields'] = fields
data['fields_map'] = dict((field['name'], i)
for i, field in enumerate(fields))
if req.get_header('X-Requested-With') == 'XMLHttpRequest':
data['preview_mode'] = True
return 'ticket_box.html', data, None
add_stylesheet(req, 'common/css/ticket.css')
add_script(req, 'common/js/folding.js')
Chrome(self.env).add_wiki_toolbars(req)
Chrome(self.env).add_auto_preview(req)
return 'ticket.html', data, None
def _process_ticket_request(self, req):
id = int(req.args.get('id'))
version = as_int(req.args.get('version'), None)
xhr = req.get_header('X-Requested-With') == 'XMLHttpRequest'
if xhr and 'preview_comment' in req.args:
context = web_context(req, 'ticket', id, version)
escape_newlines = self.must_preserve_newlines
rendered = format_to_html(self.env, context,
req.args.get('edited_comment', ''),
escape_newlines=escape_newlines)
req.send(rendered.encode('utf-8'))
req.perm('ticket', id, version).require('TICKET_VIEW')
ticket = Ticket(self.env, id, version=version)
action = req.args.get('action', ('history' in req.args and 'history' or
'view'))
data = self._prepare_data(req, ticket)
if action in ('history', 'diff'):
field = req.args.get('field')
if field:
text_fields = [field]
else:
text_fields = [field['name'] for field in ticket.fields if
field['type'] == 'textarea']
if action == 'history':
return self._render_history(req, ticket, data, text_fields)
elif action == 'diff':
return self._render_diff(req, ticket, data, text_fields)
elif action == 'comment-history':
cnum = int(req.args['cnum'])
return self._render_comment_history(req, ticket, data, cnum)
elif action == 'comment-diff':
cnum = int(req.args['cnum'])
return self._render_comment_diff(req, ticket, data, cnum)
elif 'preview_comment' in req.args:
field_changes = {}
data.update({'action': None,
'reassign_owner': req.authname,
'resolve_resolution': None,
'start_time': ticket['changetime']})
elif req.method == 'POST':
if 'cancel_comment' in req.args:
req.redirect(req.href.ticket(ticket.id))
elif 'edit_comment' in req.args:
comment = req.args.get('edited_comment', '')
cnum = int(req.args['cnum_edit'])
change = ticket.get_change(cnum)
if not (req.authname and req.authname != 'anonymous'
and change and change['author'] == req.authname):
req.perm(ticket.resource).require('TICKET_EDIT_COMMENT')
ticket.modify_comment(change['date'], req.authname, comment)
req.redirect(req.href.ticket(ticket.id) + '#comment:%d' % cnum)
valid = True
# Do any action on the ticket?
actions = TicketSystem(self.env).get_available_actions(req, ticket)
if action not in actions:
valid = False
add_warning(req, _('The action "%(name)s" is not available.',
name=action))
# We have a bit of a problem. There are two sources of changes to
# the ticket: the user, and the workflow. We need to show all the
# changes that are proposed, but we need to be able to drop the
# workflow changes if the user changes the action they want to do
# from one preview to the next.
#
# the _populate() call pulls all the changes from the webpage; but
# the webpage includes both changes by the user and changes by the
# workflow... so we aren't able to differentiate them clearly.
self._populate(req, ticket) # Apply changes made by the user
field_changes, problems = self.get_ticket_changes(req, ticket,
action)
if problems:
valid = False
for problem in problems:
add_warning(req, problem)
add_warning(req,
tag_("Please review your configuration, "
"probably starting with %(section)s "
"in your %(tracini)s.",
section=tag.pre('[ticket]', tag.br(),
'workflow = ...'),
tracini=tag.tt('trac.ini')))
# Apply changes made by the workflow
self._apply_ticket_changes(ticket, field_changes)
# Unconditionally run the validation so that the user gets
# information any and all problems. But it's only valid if it
# validates and there were no problems with the workflow side of
# things.
valid = self._validate_ticket(req, ticket, not valid) and valid
if 'submit' in req.args:
if valid:
# redirected if successful
self._do_save(req, ticket, action)
# else fall through in a preview
req.args['preview'] = True
# Preview an existing ticket (after a Preview or a failed Save)
start_time = from_utimestamp(long(req.args.get('start_time', 0)))
data.update({
'action': action, 'start_time': start_time,
'reassign_owner': (req.args.get('reassign_choice')
or req.authname),
'resolve_resolution': req.args.get('resolve_choice'),
'valid': valid
})
else: # simply 'View'ing the ticket
field_changes = {}
data.update({'action': None,
'reassign_owner': req.authname,
'resolve_resolution': None,
# Store a timestamp for detecting "mid air collisions"
'start_time': ticket['changetime']})
data.update({'comment': req.args.get('comment'),
'cnum_edit': req.args.get('cnum_edit'),
'edited_comment': req.args.get('edited_comment'),
'cnum_hist': req.args.get('cnum_hist'),
'cversion': req.args.get('cversion')})
self._insert_ticket_data(req, ticket, data,
get_reporter_id(req, 'author'), field_changes)
if xhr:
data['preview_mode'] = bool(data['change_preview']['fields'])
return 'ticket_preview.html', data, None
mime = Mimeview(self.env)
format = req.args.get('format')
if format:
# FIXME: mime.send_converted(context, ticket, 'ticket_x') (#3332)
filename = 't%d' % ticket.id if format != 'rss' else None
mime.send_converted(req, 'trac.ticket.Ticket', ticket,
format, filename=filename)
def add_ticket_link(css_class, id):
t = ticket.resource(id=id, version=None)
if t:
add_link(req, css_class, req.href.ticket(id),
_("Ticket #%(id)s", id=id))
global_sequence = True
# If the ticket is being shown in the context of a query, add
# links to help navigate in the query result set
if 'query_tickets' in req.session:
tickets = req.session['query_tickets'].split()
if str(ticket.id) in tickets:
idx = tickets.index(str(ticket.id))
if idx > 0:
add_ticket_link('first', tickets[0])
add_ticket_link('prev', tickets[idx - 1])
if idx < len(tickets) - 1:
add_ticket_link('next', tickets[idx + 1])
add_ticket_link('last', tickets[-1])
add_link(req, 'up', req.session['query_href'])
global_sequence = False
if global_sequence:
with self.env.db_query as db:
for min_id, max_id in db(
"SELECT min(id), max(id) FROM ticket"):
min_id = int(min_id)
max_id = int(max_id)
if min_id < ticket.id:
add_ticket_link('first', min_id)
for prev_id, in db(
"SELECT max(id) FROM ticket WHERE id < %s",
(ticket.id,)):
add_ticket_link('prev', int(prev_id))
if ticket.id < max_id:
add_ticket_link('last', max_id)
for next_id, in db(
"SELECT min(id) FROM ticket WHERE %s < id",
(ticket.id,)):
add_ticket_link('next', int(next_id))
break
add_script_data(req, {'comments_prefs': self._get_prefs(req)})
add_stylesheet(req, 'common/css/ticket.css')
add_script(req, 'common/js/folding.js')
Chrome(self.env).add_wiki_toolbars(req)
Chrome(self.env).add_auto_preview(req)
# Add registered converters
for conversion in mime.get_supported_conversions('trac.ticket.Ticket'):
format = conversion[0]
conversion_href = get_resource_url(self.env, ticket.resource,
req.href, format=format)
if format == 'rss':
conversion_href = auth_link(req, conversion_href)
add_link(req, 'alternate', conversion_href, conversion[1],
conversion[4], format)
prevnext_nav(req, _("Previous Ticket"), _("Next Ticket"),
_("Back to Query"))
return 'ticket.html', data, None
def _get_prefs(self, req):
return {'comments_order': req.session.get('ticket_comments_order',
'oldest'),
'comments_only': req.session.get('ticket_comments_only',
'false')}
def _prepare_data(self, req, ticket, absurls=False):
return {'ticket': ticket, 'to_utimestamp': to_utimestamp,
'context': web_context(req, ticket.resource, absurls=absurls),
'preserve_newlines': self.must_preserve_newlines,
'emtpy': empty}
def _cc_list(self, cc):
return Chrome(self.env).cc_list(cc)
def _toggle_cc(self, req, cc):
"""Return an (action, recipient) tuple corresponding to a change
of CC status for this user relative to the current `cc_list`."""
entries = []
email = req.session.get('email', '').strip()
if email:
entries.append(email)
if req.authname != 'anonymous':
entries.append(req.authname)
else:
author = get_reporter_id(req, 'author').strip()
if author and author != 'anonymous':
email = author.split()[-1]
if (email[0], email[-1]) == ('<', '>'):
email = email[1:-1]
entries.append(email)
add = []
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/admin.py | trac/trac/ticket/admin.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from __future__ import with_statement
from datetime import datetime
from trac.admin import *
from trac.core import *
from trac.perm import PermissionSystem
from trac.resource import ResourceNotFound
from trac.ticket import model
from trac.util import getuser
from trac.util.datefmt import utc, parse_date, format_date, format_datetime, \
get_datetime_format_hint, user_time
from trac.util.text import print_table, printout, exception_to_unicode
from trac.util.translation import _, N_, gettext
from trac.web.chrome import Chrome, add_notice, add_warning
class TicketAdminPanel(Component):
implements(IAdminPanelProvider, IAdminCommandProvider)
abstract = True
_label = (N_('(Undefined)'), N_('(Undefined)'))
# i18n note: use gettext() whenever refering to the above as text labels,
# and don't use it whenever using them as field names (after
# a call to `.lower()`)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TICKET_ADMIN' in req.perm:
# in global scope show only products
# in local scope everything but products
parent = getattr(self.env, 'parent', None)
if (parent is None and self._type == 'products') or \
(parent and self._type != 'products'):
yield ('ticket', _('Ticket System'), self._type,
gettext(self._label[1]))
def render_admin_panel(self, req, cat, page, version):
req.perm.require('TICKET_ADMIN')
# Trap AssertionErrors and convert them to TracErrors
try:
return self._render_admin_panel(req, cat, page, version)
except AssertionError, e:
raise TracError(e)
def _save_config(config, req, log):
"""Try to save the config, and display either a success notice or a
failure warning.
"""
try:
config.save()
add_notice(req, _('Your changes have been saved.'))
except Exception, e:
log.error('Error writing to trac.ini: %s', exception_to_unicode(e))
add_warning(req, _('Error writing to trac.ini, make sure it is '
'writable by the web server. Your changes have not '
'been saved.'))
class ComponentAdminPanel(TicketAdminPanel):
_type = 'components'
_label = (N_('Component'), N_('Components'))
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, component):
# Detail view?
if component:
comp = model.Component(self.env, component)
if req.method == 'POST':
if req.args.get('save'):
comp.name = name = req.args.get('name')
comp.owner = req.args.get('owner')
comp.description = req.args.get('description')
try:
comp.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The component "%(name)s" already '
'exists.', name=name))
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'component': comp}
else:
default = self.config.get('ticket', 'default_component')
if req.method == 'POST':
# Add Component
if req.args.get('add') and req.args.get('name'):
name = req.args.get('name')
try:
comp = model.Component(self.env, name=name)
except ResourceNotFound:
comp = model.Component(self.env)
comp.name = name
if req.args.get('owner'):
comp.owner = req.args.get('owner')
comp.insert()
add_notice(req, _('The component "%(name)s" has been '
'added.', name=name))
req.redirect(req.href.admin(cat, page))
else:
if comp.name is None:
raise TracError(_("Invalid component name."))
raise TracError(_("Component %(name)s already exists.",
name=name))
# Remove components
elif req.args.get('remove'):
sel = req.args.get('sel')
if not sel:
raise TracError(_('No component selected'))
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
model.Component(self.env, name).delete()
add_notice(req, _("The selected components have been "
"removed."))
req.redirect(req.href.admin(cat, page))
# Set default component
elif req.args.get('apply'):
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default component to %s", name)
self.config.set('ticket', 'default_component', name)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
data = {'view': 'list',
'components': model.Component.select(self.env),
'default': default}
if self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
def valid_owner(username):
return perm.get_user_permissions(username).get('TICKET_MODIFY')
data['owners'] = [username for username, name, email
in self.env.get_known_users()
if valid_owner(username)]
data['owners'].insert(0, '')
data['owners'].sort()
else:
data['owners'] = None
return 'admin_components.html', data
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('component list', '',
'Show available components',
None, self._do_list)
yield ('component add', '<name> <owner>',
'Add a new component',
self._complete_add, self._do_add)
yield ('component rename', '<name> <newname>',
'Rename a component',
self._complete_remove_rename, self._do_rename)
yield ('component remove', '<name>',
'Remove/uninstall a component',
self._complete_remove_rename, self._do_remove)
yield ('component chown', '<name> <owner>',
'Change component ownership',
self._complete_chown, self._do_chown)
def get_component_list(self):
return [c.name for c in model.Component.select(self.env)]
def get_user_list(self):
return [username for username, in
self.env.db_query("SELECT DISTINCT username FROM permission")]
def _complete_add(self, args):
if len(args) == 2:
return self.get_user_list()
def _complete_remove_rename(self, args):
if len(args) == 1:
return self.get_component_list()
def _complete_chown(self, args):
if len(args) == 1:
return self.get_component_list()
elif len(args) == 2:
return self.get_user_list()
def _do_list(self):
print_table([(c.name, c.owner)
for c in model.Component.select(self.env)],
[_('Name'), _('Owner')])
def _do_add(self, name, owner):
component = model.Component(self.env)
component.name = name
component.owner = owner
component.insert()
def _do_rename(self, name, newname):
component = model.Component(self.env, name)
component.name = newname
component.update()
def _do_remove(self, name):
model.Component(self.env, name).delete()
def _do_chown(self, name, owner):
component = model.Component(self.env, name)
component.owner = owner
component.update()
class MilestoneAdminPanel(TicketAdminPanel):
_type = 'milestones'
_label = (N_('Milestone'), N_('Milestones'))
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'MILESTONE_VIEW' in req.perm:
return TicketAdminPanel.get_admin_panels(self, req)
return iter([])
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, milestone):
req.perm.require('MILESTONE_VIEW')
# Detail view?
if milestone:
mil = model.Milestone(self.env, milestone)
if req.method == 'POST':
if req.args.get('save'):
req.perm.require('MILESTONE_MODIFY')
mil.name = name = req.args.get('name')
mil.due = mil.completed = None
due = req.args.get('duedate', '')
if due:
mil.due = user_time(req, parse_date, due,
hint='datetime')
if req.args.get('completed', False):
completed = req.args.get('completeddate', '')
mil.completed = user_time(req, parse_date, completed,
hint='datetime')
if mil.completed > datetime.now(utc):
raise TracError(_('Completion date may not be in '
'the future'),
_('Invalid Completion Date'))
mil.description = req.args.get('description', '')
try:
mil.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The milestone "%(name)s" already '
'exists.', name=name))
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'milestone': mil}
else:
default = self.config.get('ticket', 'default_milestone')
if req.method == 'POST':
# Add Milestone
if req.args.get('add') and req.args.get('name'):
req.perm.require('MILESTONE_CREATE')
name = req.args.get('name')
try:
mil = model.Milestone(self.env, name=name)
except ResourceNotFound:
mil = model.Milestone(self.env)
mil.name = name
if req.args.get('duedate'):
mil.due = user_time(req, parse_date,
req.args.get('duedate'),
hint='datetime')
mil.insert()
add_notice(req, _('The milestone "%(name)s" has been '
'added.', name=name))
req.redirect(req.href.admin(cat, page))
else:
if mil.name is None:
raise TracError(_('Invalid milestone name.'))
raise TracError(_("Milestone %(name)s already exists.",
name=name))
# Remove milestone
elif req.args.get('remove'):
req.perm.require('MILESTONE_DELETE')
sel = req.args.get('sel')
if not sel:
raise TracError(_('No milestone selected'))
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
mil = model.Milestone(self.env, name)
mil.delete(author=req.authname)
add_notice(req, _("The selected milestones have been "
"removed."))
req.redirect(req.href.admin(cat, page))
# Set default milestone
elif req.args.get('apply'):
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default milestone to %s", name)
self.config.set('ticket', 'default_milestone', name)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
# Get ticket count
milestones = [
(milestone, self.env.db_query("""
SELECT COUNT(*) FROM ticket WHERE milestone=%s
""", (milestone.name,))[0][0])
for milestone in model.Milestone.select(self.env)]
data = {'view': 'list',
'milestones': milestones,
'default': default}
Chrome(self.env).add_jquery_ui(req)
data.update({
'datetime_hint': get_datetime_format_hint(req.lc_time),
})
return 'admin_milestones.html', data
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('milestone list', '',
"Show milestones",
None, self._do_list)
yield ('milestone add', '<name> [due]',
"Add milestone",
None, self._do_add)
yield ('milestone rename', '<name> <newname>',
"Rename milestone",
self._complete_name, self._do_rename)
yield ('milestone due', '<name> <due>',
"""Set milestone due date
The <due> date must be specified in the "%s" format.
Alternatively, "now" can be used to set the due date to the
current time. To remove the due date from a milestone, specify
an empty string ("").
""" % console_date_format_hint,
self._complete_name, self._do_due)
yield ('milestone completed', '<name> <completed>',
"""Set milestone complete date
The <completed> date must be specified in the "%s" format.
Alternatively, "now" can be used to set the completion date to
the current time. To remove the completion date from a
milestone, specify an empty string ("").
""" % console_date_format_hint,
self._complete_name, self._do_completed)
yield ('milestone remove', '<name>',
"Remove milestone",
self._complete_name, self._do_remove)
def get_milestone_list(self):
return [m.name for m in model.Milestone.select(self.env)]
def _complete_name(self, args):
if len(args) == 1:
return self.get_milestone_list()
def _do_list(self):
print_table([(m.name, m.due and
format_date(m.due, console_date_format),
m.completed and
format_datetime(m.completed, console_datetime_format))
for m in model.Milestone.select(self.env)],
[_("Name"), _("Due"), _("Completed")])
def _do_add(self, name, due=None):
milestone = model.Milestone(self.env)
milestone.name = name
if due is not None:
milestone.due = parse_date(due, hint='datetime')
milestone.insert()
def _do_rename(self, name, newname):
milestone = model.Milestone(self.env, name)
milestone.name = newname
milestone.update()
def _do_due(self, name, due):
milestone = model.Milestone(self.env, name)
milestone.due = due and parse_date(due, hint='datetime')
milestone.update()
def _do_completed(self, name, completed):
milestone = model.Milestone(self.env, name)
milestone.completed = completed and parse_date(completed,
hint='datetime')
milestone.update()
def _do_remove(self, name):
model.Milestone(self.env, name).delete(author=getuser())
class VersionAdminPanel(TicketAdminPanel):
_type = 'versions'
_label = (N_('Version'), N_('Versions'))
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, version):
# Detail view?
if version:
ver = model.Version(self.env, version)
if req.method == 'POST':
if req.args.get('save'):
ver.name = name = req.args.get('name')
if req.args.get('time'):
ver.time = user_time(req, parse_date,
req.args.get('time'),
hint='datetime')
else:
ver.time = None # unset
ver.description = req.args.get('description')
try:
ver.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The version "%(name)s" already '
'exists.', name=name))
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'version': ver}
else:
default = self.config.get('ticket', 'default_version')
if req.method == 'POST':
# Add Version
if req.args.get('add') and req.args.get('name'):
name = req.args.get('name')
try:
ver = model.Version(self.env, name=name)
except ResourceNotFound:
ver = model.Version(self.env)
ver.name = name
if req.args.get('time'):
ver.time = user_time(req, parse_date,
req.args.get('time'),
hint='datetime')
ver.insert()
add_notice(req, _('The version "%(name)s" has been '
'added.', name=name))
req.redirect(req.href.admin(cat, page))
else:
if ver.name is None:
raise TracError(_("Invalid version name."))
raise TracError(_("Version %(name)s already exists.",
name=name))
# Remove versions
elif req.args.get('remove'):
sel = req.args.get('sel')
if not sel:
raise TracError(_("No version selected"))
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
ver = model.Version(self.env, name)
ver.delete()
add_notice(req, _("The selected versions have been "
"removed."))
req.redirect(req.href.admin(cat, page))
# Set default version
elif req.args.get('apply'):
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default version to %s", name)
self.config.set('ticket', 'default_version', name)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
data = {'view': 'list',
'versions': model.Version.select(self.env),
'default': default}
Chrome(self.env).add_jquery_ui(req)
data.update({
'datetime_hint': get_datetime_format_hint(req.lc_time),
})
return 'admin_versions.html', data
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('version list', '',
"Show versions",
None, self._do_list)
yield ('version add', '<name> [time]',
"Add version",
None, self._do_add)
yield ('version rename', '<name> <newname>',
"Rename version",
self._complete_name, self._do_rename)
yield ('version time', '<name> <time>',
"""Set version date
The <time> must be specified in the "%s" format. Alternatively,
"now" can be used to set the version date to the current time.
To remove the date from a version, specify an empty string
("").
""" % console_date_format_hint,
self._complete_name, self._do_time)
yield ('version remove', '<name>',
"Remove version",
self._complete_name, self._do_remove)
def get_version_list(self):
return [v.name for v in model.Version.select(self.env)]
def _complete_name(self, args):
if len(args) == 1:
return self.get_version_list()
def _do_list(self):
print_table([(v.name,
v.time and format_date(v.time, console_date_format))
for v in model.Version.select(self.env)],
[_("Name"), _("Time")])
def _do_add(self, name, time=None):
version = model.Version(self.env)
version.name = name
if time is not None:
version.time = time and parse_date(time, hint='datetime')
version.insert()
def _do_rename(self, name, newname):
version = model.Version(self.env, name)
version.name = newname
version.update()
def _do_time(self, name, time):
version = model.Version(self.env, name)
version.time = time and parse_date(time, hint='datetime')
version.update()
def _do_remove(self, name):
model.Version(self.env, name).delete()
class AbstractEnumAdminPanel(TicketAdminPanel):
abstract = True
_type = 'unknown'
_enum_cls = None
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, path_info):
label = [gettext(each) for each in self._label]
data = {'label_singular': label[0], 'label_plural': label[1],
'type': self._type}
# Detail view?
if path_info:
enum = self._enum_cls(self.env, path_info)
if req.method == 'POST':
if req.args.get('save'):
enum.name = name = req.args.get('name')
try:
enum.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('%(type)s value "%(name)s" already '
'exists', type=label[0], name=name))
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
data.update({'view': 'detail', 'enum': enum})
else:
default = self.config.get('ticket', 'default_%s' % self._type)
if req.method == 'POST':
# Add enum
if req.args.get('add') and req.args.get('name'):
name = req.args.get('name')
try:
enum = self._enum_cls(self.env, name=name)
except ResourceNotFound:
enum = self._enum_cls(self.env)
enum.name = name
enum.insert()
add_notice(req, _('The %(field)s value "%(name)s" has '
'been added.',
field=label[0], name=name))
req.redirect(req.href.admin(cat, page))
else:
if enum.name is None:
raise TracError(_("Invalid %(type)s value.",
type=label[0]))
raise TracError(_('%(type)s value "%(name)s" already '
'exists', type=label[0], name=name))
# Remove enums
elif req.args.get('remove'):
sel = req.args.get('sel')
if not sel:
raise TracError(_("No %s selected") % self._type)
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
self._enum_cls(self.env, name).delete()
add_notice(req, _("The selected %(field)s values have "
"been removed.", field=label[0]))
req.redirect(req.href.admin(cat, page))
# Apply changes
elif req.args.get('apply'):
changed = False
# Set default value
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default %s to %s",
self._type, name)
self.config.set('ticket', 'default_%s' % self._type,
name)
try:
self.config.save()
changed = True
except Exception, e:
self.log.error("Error writing to trac.ini: %s",
exception_to_unicode(e))
add_warning(req,
_("Error writing to trac.ini, make "
"sure it is writable by the web "
"server. The default value has not "
"been saved."))
# Change enum values
order = dict([(str(int(key[6:])),
str(int(req.args.get(key)))) for key
in req.args.keys()
if key.startswith('value_')])
values = dict([(val, True) for val in order.values()])
if len(order) != len(values):
raise TracError(_("Order numbers must be unique"))
with self.env.db_transaction:
for enum in self._enum_cls.select(self.env):
new_value = order[enum.value]
if new_value != enum.value:
enum.value = new_value
enum.update()
changed = True
if changed:
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.admin(cat, page))
data.update(dict(enums=list(self._enum_cls.select(self.env)),
default=default, view='list'))
return 'admin_enums.html', data
# IAdminCommandProvider methods
_command_help = {
'list': 'Show possible ticket %s',
'add': 'Add a %s value option',
'change': 'Change a %s value',
'remove': 'Remove a %s value',
'order': 'Move a %s value up or down in the list',
}
def get_admin_commands(self):
enum_type = getattr(self, '_command_type', self._type)
label = tuple(each.lower() for each in self._label)
yield ('%s list' % enum_type, '',
self._command_help['list'] % label[1],
None, self._do_list)
yield ('%s add' % enum_type, '<value>',
self._command_help['add'] % label[0],
None, self._do_add)
yield ('%s change' % enum_type, '<value> <newvalue>',
self._command_help['change'] % label[0],
self._complete_change_remove, self._do_change)
yield ('%s remove' % enum_type, '<value>',
self._command_help['remove'] % label[0],
self._complete_change_remove, self._do_remove)
yield ('%s order' % enum_type, '<value> up|down',
self._command_help['order'] % label[0],
self._complete_order, self._do_order)
def get_enum_list(self):
return [e.name for e in self._enum_cls.select(self.env)]
def _complete_change_remove(self, args):
if len(args) == 1:
return self.get_enum_list()
def _complete_order(self, args):
if len(args) == 1:
return self.get_enum_list()
elif len(args) == 2:
return ['up', 'down']
def _do_list(self):
print_table([(e.name,) for e in self._enum_cls.select(self.env)],
[_('Possible Values')])
def _do_add(self, name):
enum = self._enum_cls(self.env)
enum.name = name
enum.insert()
def _do_change(self, name, newname):
enum = self._enum_cls(self.env, name)
enum.name = newname
enum.update()
def _do_remove(self, value):
self._enum_cls(self.env, value).delete()
def _do_order(self, name, up_down):
if up_down not in ('up', 'down'):
raise AdminCommandError(_("Invalid up/down value: %(value)s",
value=up_down))
direction = -1 if up_down == 'up' else 1
enum1 = self._enum_cls(self.env, name)
enum1.value = int(float(enum1.value) + direction)
for enum2 in self._enum_cls.select(self.env):
if int(float(enum2.value)) == enum1.value:
enum2.value = int(float(enum2.value) - direction)
break
else:
return
with self.env.db_transaction:
enum1.update()
enum2.update()
class PriorityAdminPanel(AbstractEnumAdminPanel):
_type = 'priority'
_enum_cls = model.Priority
_label = (N_('Priority'), N_('Priorities'))
class ResolutionAdminPanel(AbstractEnumAdminPanel):
_type = 'resolution'
_enum_cls = model.Resolution
_label = (N_('Resolution'), N_('Resolutions'))
class SeverityAdminPanel(AbstractEnumAdminPanel):
_type = 'severity'
_enum_cls = model.Severity
_label = (N_('Severity'), N_('Severities'))
class TicketTypeAdminPanel(AbstractEnumAdminPanel):
_type = 'type'
_enum_cls = model.Type
_label = (N_('Ticket Type'), N_('Ticket Types'))
_command_type = 'ticket_type'
_command_help = {
'list': 'Show possible %s',
'add': 'Add a %s',
'change': 'Change a %s',
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/query.py | trac/trac/ticket/query.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import csv
from itertools import groupby
from math import ceil
from datetime import datetime, timedelta
import re
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Option, IntOption
from trac.core import *
from trac.db import get_column_names
from trac.mimeview.api import IContentConverter, Mimeview
from trac.resource import Resource
from trac.ticket.api import TicketSystem
from trac.ticket.model import Milestone, group_milestones, Ticket
from trac.util import Ranges, as_bool
from trac.util.datefmt import format_date, format_datetime, from_utimestamp, \
parse_date, to_timestamp, to_utimestamp, utc, \
user_time
from trac.util.presentation import Paginator
from trac.util.text import empty, shorten_line, quote_query_string
from trac.util.translation import _, tag_, cleandoc_
from trac.web import arg_list_to_args, parse_arg_list, IRequestHandler
from trac.web.href import Href
from trac.web.chrome import (INavigationContributor, Chrome,
add_ctxtnav, add_link, add_script,
add_script_data, add_stylesheet, add_warning,
web_context)
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.macros import WikiMacroBase # TODO: should be moved in .api
class QuerySyntaxError(TracError):
"""Exception raised when a ticket query cannot be parsed from a string."""
class QueryValueError(TracError):
"""Exception raised when a ticket query has bad constraint values."""
def __init__(self, errors):
TracError.__init__(self, _('Invalid query constraint value'))
self.errors = errors
class Query(object):
substitutions = ['$USER']
clause_re = re.compile(r'(?P<clause>\d+)_(?P<field>.+)$')
def __init__(self, env, report=None, constraints=None, cols=None,
order=None, desc=0, group=None, groupdesc=0, verbose=0,
rows=None, page=None, max=None, format=None):
self.env = env
self.id = report # if not None, it's the corresponding saved query
constraints = constraints or []
if isinstance(constraints, dict):
constraints = [constraints]
self.constraints = constraints
synonyms = TicketSystem(self.env).get_field_synonyms()
self.order = synonyms.get(order, order) # 0.11 compatibility
self.desc = desc
self.group = group
self.groupdesc = groupdesc
self.format = format
self.default_page = 1
self.items_per_page = QueryModule(self.env).items_per_page
# getting page number (default_page if unspecified)
if not page:
page = self.default_page
try:
self.page = int(page)
if self.page < 1:
raise ValueError()
except ValueError:
raise TracError(_('Query page %(page)s is invalid.', page=page))
# max=0 signifies showing all items on one page
# max=n will show precisely n items on all pages except the last
# max<0 is invalid
if max in ('none', ''):
max = 0
if max is None: # meaning unspecified
max = self.items_per_page
try:
self.max = int(max)
if self.max < 0:
raise ValueError()
except ValueError:
raise TracError(_('Query max %(max)s is invalid.', max=max))
if self.max == 0:
self.has_more_pages = False
self.offset = 0
else:
self.has_more_pages = True
self.offset = self.max * (self.page - 1)
if rows == None:
rows = []
if verbose and 'description' not in rows: # 0.10 compatibility
rows.append('description')
self.fields = TicketSystem(self.env).get_ticket_fields()
self.time_fields = set(f['name'] for f in self.fields
if f['type'] == 'time')
field_names = set(f['name'] for f in self.fields)
self.cols = [c for c in cols or [] if c in field_names or
c == 'id']
self.rows = [c for c in rows if c in field_names]
if self.order != 'id' and self.order not in field_names:
self.order = 'priority'
if self.group not in field_names:
self.group = None
constraint_cols = {}
for clause in self.constraints:
for k, v in clause.items():
if k == 'id' or k in field_names:
constraint_cols.setdefault(k, []).append(v)
else:
clause.pop(k)
self.constraint_cols = constraint_cols
_clause_splitter = re.compile(r'(?<!\\)&')
_item_splitter = re.compile(r'(?<!\\)\|')
@classmethod
def from_string(cls, env, string, **kw):
kw_strs = ['order', 'group', 'page', 'max', 'format']
kw_arys = ['rows']
kw_bools = ['desc', 'groupdesc', 'verbose']
kw_synonyms = {'row': 'rows'}
# i18n TODO - keys will be unicode
synonyms = TicketSystem(env).get_field_synonyms()
constraints = [{}]
cols = []
report = None
def as_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
for filter_ in cls._clause_splitter.split(string):
if filter_ == 'or':
constraints.append({})
continue
filter_ = filter_.replace(r'\&', '&').split('=', 1)
if len(filter_) != 2:
raise QuerySyntaxError(_('Query filter requires field and '
'constraints separated by a "="'))
field, values = filter_
# from last chars of `field`, get the mode of comparison
mode = ''
if field and field[-1] in ('~', '^', '$') \
and not field in cls.substitutions:
mode = field[-1]
field = field[:-1]
if field and field[-1] == '!':
mode = '!' + mode
field = field[:-1]
if not field:
raise QuerySyntaxError(_('Query filter requires field name'))
field = kw_synonyms.get(field, field)
# add mode of comparison and remove escapes
processed_values = [mode + val.replace(r'\|', '|')
for val in cls._item_splitter.split(values)]
if field in kw_strs:
kw[as_str(field)] = processed_values[0]
elif field in kw_arys:
kw.setdefault(as_str(field), []).extend(processed_values)
elif field in kw_bools:
kw[as_str(field)] = as_bool(processed_values[0])
elif field == 'col':
cols.extend(synonyms.get(value, value)
for value in processed_values)
elif field == 'report':
report = processed_values[0]
else:
constraints[-1].setdefault(synonyms.get(field, field),
[]).extend(processed_values)
constraints = filter(None, constraints)
report = kw.pop('report', report)
return cls(env, report, constraints=constraints, cols=cols, **kw)
def get_columns(self):
if not self.cols:
self.cols = self.get_default_columns()
if not 'id' in self.cols:
# make sure 'id' is always present (needed for permission checks)
self.cols.insert(0, 'id')
return self.cols
def get_all_textareas(self):
return [f['name'] for f in self.fields if f['type'] == 'textarea']
def get_all_columns(self):
# Prepare the default list of columns
cols = ['id']
cols += [f['name'] for f in self.fields if f['type'] != 'textarea']
for col in ('reporter', 'keywords', 'cc'):
if col in cols:
cols.remove(col)
cols.append(col)
def sort_columns(col1, col2):
constrained_fields = self.constraint_cols.keys()
if 'id' in (col1, col2):
# Ticket ID is always the first column
return -1 if col1 == 'id' else 1
elif 'summary' in (col1, col2):
# Ticket summary is always the second column
return -1 if col1 == 'summary' else 1
elif col1 in constrained_fields or col2 in constrained_fields:
# Constrained columns appear before other columns
return -1 if col1 in constrained_fields else 1
return 0
cols.sort(sort_columns)
return cols
def get_default_columns(self):
cols = self.get_all_columns()
# Semi-intelligently remove columns that are restricted to a single
# value by a query constraint.
for col in [k for k in self.constraint_cols.keys()
if k != 'id' and k in cols]:
constraints = self.constraint_cols[col]
for constraint in constraints:
if not (len(constraint) == 1 and constraint[0]
and not constraint[0][0] in '!~^$' and col in cols
and col not in self.time_fields):
break
else:
cols.remove(col)
if col == 'status' and 'resolution' in cols:
for constraint in constraints:
if 'closed' in constraint:
break
else:
cols.remove('resolution')
if self.group in cols:
cols.remove(self.group)
# Only display the first seven columns by default
cols = cols[:7]
# Make sure the column we order by is visible, if it isn't also
# the column we group by
if not self.order in cols and not self.order == self.group:
cols[-1] = self.order
return cols
def count(self, req=None, db=None, cached_ids=None, authname=None,
tzinfo=None, locale=None):
"""Get the number of matching tickets for the present query.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
sql, args = self.get_sql(req, cached_ids, authname, tzinfo, locale)
return self._count(sql, args)
def _count(self, sql, args):
cnt = self.env.db_query("SELECT COUNT(*) FROM (%s) AS x"
% sql, args)[0][0]
# "AS x" is needed for MySQL ("Subqueries in the FROM Clause")
self.env.log.debug("Count results in Query: %d", cnt)
return cnt
def execute(self, req=None, db=None, cached_ids=None, authname=None,
tzinfo=None, href=None, locale=None):
"""Retrieve the list of matching tickets.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if req is not None:
href = req.href
with self.env.db_query as db:
cursor = db.cursor()
self.num_items = 0
sql, args = self.get_sql(req, cached_ids, authname, tzinfo, locale)
self.num_items = self._count(sql, args)
if self.num_items <= self.max:
self.has_more_pages = False
if self.has_more_pages:
max = self.max
if self.group:
max += 1
sql = sql + " LIMIT %d OFFSET %d" % (max, self.offset)
if (self.page > int(ceil(float(self.num_items) / self.max)) and
self.num_items != 0):
raise TracError(_("Page %(page)s is beyond the number of "
"pages in the query", page=self.page))
# self.env.log.debug("SQL: " + sql % tuple([repr(a) for a in args]))
cursor.execute(sql, args)
columns = get_column_names(cursor)
fields = []
for column in columns:
fields += [f for f in self.fields if f['name'] == column] or \
[None]
results = []
column_indices = range(len(columns))
for row in cursor:
result = {}
for i in column_indices:
name, field, val = columns[i], fields[i], row[i]
if name == 'reporter':
val = val or 'anonymous'
elif name == 'id':
val = int(val)
if href is not None:
result['href'] = href.ticket(val)
elif name in self.time_fields:
val = from_utimestamp(val)
elif field and field['type'] == 'checkbox':
try:
val = bool(int(val))
except (TypeError, ValueError):
val = False
elif val is None:
val = ''
result[name] = val
results.append(result)
cursor.close()
return results
def get_href(self, href, id=None, order=None, desc=None, format=None,
max=None, page=None):
"""Create a link corresponding to this query.
:param href: the `Href` object used to build the URL
:param id: optionally set or override the report `id`
:param order: optionally override the order parameter of the query
:param desc: optionally override the desc parameter
:param format: optionally override the format of the query
:param max: optionally override the max items per page
:param page: optionally specify which page of results (defaults to
the first)
Note: `get_resource_url` of a 'query' resource?
"""
if not isinstance(href, Href):
href = href.href # compatibility with the `req` of the 0.10 API
if format is None:
format = self.format
if format == 'rss':
max = self.items_per_page
page = self.default_page
if id is None:
id = self.id
if desc is None:
desc = self.desc
if order is None:
order = self.order
if max is None:
max = self.max
if page is None:
page = self.page
cols = self.get_columns()
# don't specify the columns in the href if they correspond to
# the default columns, page and max in the same order. That keeps the
# query url shorter in the common case where we just want the default
# columns.
if cols == self.get_default_columns():
cols = None
if page == self.default_page:
page = None
if max == self.items_per_page:
max = None
constraints = []
for clause in self.constraints:
constraints.extend(clause.iteritems())
constraints.append(("or", empty))
del constraints[-1:]
return href.query(constraints,
report=id,
order=order, desc=1 if desc else None,
group=self.group or None,
groupdesc=1 if self.groupdesc else None,
col=cols,
row=self.rows,
max=max,
page=page,
format=format)
def to_string(self):
"""Return a user readable and editable representation of the query.
Note: for now, this is an "exploded" query href, but ideally should be
expressed in TracQuery language.
"""
query_string = self.get_href(Href(''))
query_string = query_string.split('?', 1)[-1]
return 'query:?' + query_string.replace('&', '\n&\n')
def get_sql(self, req=None, cached_ids=None, authname=None, tzinfo=None,
locale=None):
"""Return a (sql, params) tuple for the query."""
if req is not None:
authname = req.authname
tzinfo = req.tz
locale = req.locale
self.get_columns()
db = self.env.get_read_db()
enum_columns = ('resolution', 'priority', 'severity')
# Build the list of actual columns to query
cols = self.cols[:]
def add_cols(*args):
for col in args:
if not col in cols:
cols.append(col)
if self.group and not self.group in cols:
add_cols(self.group)
if self.rows:
add_cols('reporter', *self.rows)
add_cols('status', 'priority', 'time', 'changetime', self.order)
cols.extend([c for c in self.constraint_cols if not c in cols])
custom_fields = [f['name'] for f in self.fields if f.get('custom')]
list_fields = [f['name'] for f in self.fields
if f['type'] == 'text' and
f.get('format') == 'list']
sql = []
sql.append("SELECT " + ",".join(['t.%s AS %s' % (c, c) for c in cols
if c not in custom_fields]))
sql.append(",priority.value AS priority_value")
for k in [db.quote(k) for k in cols if k in custom_fields]:
sql.append(",%s.value AS %s" % (k, k))
sql.append("\nFROM ticket AS t")
# Join with ticket_custom table as necessary
for k in [k for k in cols if k in custom_fields]:
qk = db.quote(k)
sql.append("\n LEFT OUTER JOIN ticket_custom AS %s ON " \
"(id=%s.ticket AND %s.name='%s')" % (qk, qk, qk, k))
# Join with the enum table for proper sorting
for col in [c for c in enum_columns
if c == self.order or c == self.group or c == 'priority']:
sql.append("\n LEFT OUTER JOIN enum AS %s ON "
"(%s.type='%s' AND %s.name=%s)"
% (col, col, col, col, col))
# Join with the version/milestone tables for proper sorting
for col in [c for c in ['milestone', 'version']
if c == self.order or c == self.group]:
sql.append("\n LEFT OUTER JOIN %s ON (%s.name=%s)"
% (col, col, col))
def get_timestamp(date):
if date:
try:
return to_utimestamp(user_time(req, parse_date, date))
except TracError, e:
errors.append(unicode(e))
return None
def get_constraint_sql(name, value, mode, neg):
if name not in custom_fields:
col = 't.' + name
else:
col = '%s.value' % db.quote(name)
value = value[len(mode) + neg:]
if name in self.time_fields:
if '..' in value:
(start, end) = [each.strip() for each in
value.split('..', 1)]
else:
(start, end) = (value.strip(), '')
col_cast = db.cast(col, 'int64')
start = get_timestamp(start)
end = get_timestamp(end)
if start is not None and end is not None:
return ("%s(%s>=%%s AND %s<%%s)" % ('NOT ' if neg else '',
col_cast, col_cast),
(start, end))
elif start is not None:
return ("%s%s>=%%s" % ('NOT ' if neg else '', col_cast),
(start, ))
elif end is not None:
return ("%s%s<%%s" % ('NOT ' if neg else '', col_cast),
(end, ))
else:
return None
if mode == '~' and name in list_fields:
words = value.split()
clauses, args = [], []
for word in words:
cneg = ''
if word.startswith('-'):
cneg = 'NOT '
word = word[1:]
if not word:
continue
clauses.append("COALESCE(%s,'') %s%s" % (col, cneg,
db.like()))
args.append('%' + db.like_escape(word) + '%')
if not clauses:
return None
return (('NOT ' if neg else '')
+ '(' + ' AND '.join(clauses) + ')', args)
if mode == '':
return ("COALESCE(%s,'')%s=%%s" % (col, '!' if neg else ''),
(value, ))
if not value:
return None
value = db.like_escape(value)
if mode == '~':
value = '%' + value + '%'
elif mode == '^':
value = value + '%'
elif mode == '$':
value = '%' + value
return ("COALESCE(%s,'') %s%s" % (col, 'NOT ' if neg else '',
db.like()),
(value, ))
def get_clause_sql(constraints):
db = self.env.get_read_db()
clauses = []
for k, v in constraints.iteritems():
if authname is not None:
v = [val.replace('$USER', authname) for val in v]
# Determine the match mode of the constraint (contains,
# starts-with, negation, etc.)
neg = v[0].startswith('!')
mode = ''
if len(v[0]) > neg and v[0][neg] in ('~', '^', '$'):
mode = v[0][neg]
# Special case id ranges
if k == 'id':
ranges = Ranges()
for r in v:
r = r.replace('!', '')
try:
ranges.appendrange(r)
except Exception:
errors.append(_('Invalid ticket id list: '
'%(value)s', value=r))
ids = []
id_clauses = []
for a, b in ranges.pairs:
if a == b:
ids.append(str(a))
else:
id_clauses.append('id BETWEEN %s AND %s')
args.append(a)
args.append(b)
if ids:
id_clauses.append('id IN (%s)' % (','.join(ids)))
if id_clauses:
clauses.append('%s(%s)' % ('NOT 'if neg else '',
' OR '.join(id_clauses)))
# Special case for exact matches on multiple values
elif not mode and len(v) > 1 and k not in self.time_fields:
if k not in custom_fields:
col = 't.' + k
else:
col = '%s.value' % db.quote(k)
clauses.append("COALESCE(%s,'') %sIN (%s)"
% (col, 'NOT ' if neg else '',
','.join(['%s' for val in v])))
args.extend([val[neg:] for val in v])
elif v:
constraint_sql = [get_constraint_sql(k, val, mode, neg)
for val in v]
constraint_sql = filter(None, constraint_sql)
if not constraint_sql:
continue
if neg:
clauses.append("(" + " AND ".join(
[item[0] for item in constraint_sql]) + ")")
else:
clauses.append("(" + " OR ".join(
[item[0] for item in constraint_sql]) + ")")
for item in constraint_sql:
args.extend(item[1])
return " AND ".join(clauses)
args = []
errors = []
clauses = filter(None, (get_clause_sql(c) for c in self.constraints))
if clauses:
sql.append("\nWHERE ")
sql.append(" OR ".join('(%s)' % c for c in clauses))
if cached_ids:
sql.append(" OR ")
sql.append("id in (%s)" %
(','.join([str(id) for id in cached_ids])))
sql.append("\nORDER BY ")
order_cols = [(self.order, self.desc)]
if self.group and self.group != self.order:
order_cols.insert(0, (self.group, self.groupdesc))
for name, desc in order_cols:
if name in enum_columns:
col = name + '.value'
elif name in custom_fields:
col = '%s.value' % db.quote(name)
else:
col = 't.' + name
desc = ' DESC' if desc else ''
# FIXME: This is a somewhat ugly hack. Can we also have the
# column type for this? If it's an integer, we do first
# one, if text, we do 'else'
if name == 'id' or name in self.time_fields:
sql.append("COALESCE(%s,0)=0%s," % (col, desc))
else:
sql.append("COALESCE(%s,'')=''%s," % (col, desc))
if name in enum_columns:
# These values must be compared as ints, not as strings
sql.append(db.cast(col, 'int') + desc)
elif name == 'milestone':
sql.append("COALESCE(milestone.completed,0)=0%s,"
"milestone.completed%s,"
"COALESCE(milestone.due,0)=0%s,milestone.due%s,"
"%s%s" % (desc, desc, desc, desc, col, desc))
elif name == 'version':
sql.append("COALESCE(version.time,0)=0%s,version.time%s,%s%s"
% (desc, desc, col, desc))
else:
sql.append("%s%s" % (col, desc))
if name == self.group and not name == self.order:
sql.append(",")
if self.order != 'id':
sql.append(",t.id")
if errors:
raise QueryValueError(errors)
return "".join(sql), args
@staticmethod
def get_modes():
modes = {}
modes['text'] = [
{'name': _("contains"), 'value': "~"},
{'name': _("doesn't contain"), 'value': "!~"},
{'name': _("begins with"), 'value': "^"},
{'name': _("ends with"), 'value': "$"},
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"},
]
modes['textarea'] = [
{'name': _("contains"), 'value': "~"},
{'name': _("doesn't contain"), 'value': "!~"},
]
modes['select'] = [
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"},
]
modes['id'] = [
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"},
]
return modes
def template_data(self, context, tickets, orig_list=None, orig_time=None,
req=None):
clauses = []
for clause in self.constraints:
constraints = {}
for k, v in clause.items():
constraint = {'values': [], 'mode': ''}
for val in v:
neg = val.startswith('!')
if neg:
val = val[1:]
mode = ''
if val[:1] in ('~', '^', '$') \
and not val in self.substitutions:
mode, val = val[:1], val[1:]
if req:
val = val.replace('$USER', req.authname)
constraint['mode'] = ('!' if neg else '') + mode
constraint['values'].append(val)
constraints[k] = constraint
clauses.append(constraints)
cols = self.get_columns()
labels = TicketSystem(self.env).get_ticket_field_labels()
wikify = set(f['name'] for f in self.fields
if f['type'] == 'text' and f.get('format') == 'wiki')
headers = [{
'name': col, 'label': labels.get(col, _('Ticket')),
'wikify': col in wikify,
'href': self.get_href(context.href, order=col,
desc=(col == self.order and not self.desc))
} for col in cols]
fields = {'id': {'type': 'id', 'label': _("Ticket")}}
for field in self.fields:
name = field['name']
if name == 'owner' and field['type'] == 'select':
# Make $USER work when restrict_owner = true
field = field.copy()
field['options'].insert(0, '$USER')
if name == 'milestone':
milestones = [Milestone(self.env, opt)
for opt in field['options']]
milestones = [m for m in milestones
if 'MILESTONE_VIEW' in context.perm(m.resource)]
groups = group_milestones(milestones, True)
field['options'] = []
field['optgroups'] = [
{'label': label, 'options': [m.name for m in milestones]}
for (label, milestones) in groups]
fields[name] = field
groups = {}
groupsequence = []
for ticket in tickets:
if orig_list:
# Mark tickets added or changed since the query was first
# executed
if ticket['time'] > orig_time:
ticket['added'] = True
elif ticket['changetime'] > orig_time:
ticket['changed'] = True
if self.group:
group_key = ticket[self.group]
# If grouping by datetime field use days (Bloodhound #68)
if self.group in ('changetime', 'time'):
group_key = format_date(group_key)
groups.setdefault(group_key, []).append(ticket)
if not groupsequence or group_key not in groupsequence:
groupsequence.append(group_key)
groupsequence = [(value, groups[value]) for value in groupsequence]
# detect whether the last group continues on the next page,
# by checking if the extra (max+1)th ticket is in the last group
last_group_is_partial = False
if groupsequence and self.max and len(tickets) == self.max + 1:
del tickets[-1]
if len(groupsequence[-1][1]) == 1:
# additional ticket started a new group
del groupsequence[-1] # remove that additional group
else:
# additional ticket stayed in the group
last_group_is_partial = True
del groupsequence[-1][1][-1] # remove the additional ticket
results = Paginator(tickets,
self.page - 1,
self.max,
self.num_items)
if req:
if results.has_next_page:
next_href = self.get_href(req.href, max=self.max,
page=self.page + 1)
add_link(req, 'next', next_href, _('Next Page'))
if results.has_previous_page:
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/model.py | trac/trac/ticket/model.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import re
from datetime import datetime
from trac.attachment import Attachment
from trac import core
from trac.cache import cached
from trac.core import TracError
from trac.resource import Resource, ResourceNotFound, ResourceSystem
from trac.ticket.api import TicketSystem
from trac.util import embedded_numbers, partition
from trac.util.text import empty
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc, utcmax
from trac.util.translation import _
__all__ = ['Ticket', 'Type', 'Status', 'Resolution', 'Priority', 'Severity',
'Component', 'Milestone', 'Version', 'group_milestones']
def _fixup_cc_list(cc_value):
"""Fix up cc list separators and remove duplicates."""
cclist = []
for cc in re.split(r'[;,\s]+', cc_value):
if cc and cc not in cclist:
cclist.append(cc)
return ', '.join(cclist)
class Ticket(object):
# Fields that must not be modified directly by the user
protected_fields = ('resolution', 'status', 'time', 'changetime')
@staticmethod
def id_is_valid(num):
return 0 < int(num) <= 1L << 31
# 0.11 compatibility
time_created = property(lambda self: self.values.get('time'))
time_changed = property(lambda self: self.values.get('changetime'))
def __init__(self, env, tkt_id=None, db=None, version=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
self.env = env
if tkt_id is not None:
tkt_id = int(tkt_id)
self.resource = Resource('ticket', tkt_id, version)
self.fields = TicketSystem(self.env).get_ticket_fields()
self.std_fields, self.custom_fields, self.time_fields = [], [], []
for f in self.fields:
if f.get('custom'):
self.custom_fields.append(f['name'])
else:
self.std_fields.append(f['name'])
if f['type'] == 'time':
self.time_fields.append(f['name'])
self.values = {}
if tkt_id is not None:
self._fetch_ticket(tkt_id)
else:
self._init_defaults()
self.id = None
self._old = {}
exists = property(lambda self: self.id is not None)
def _init_defaults(self):
for field in self.fields:
default = None
if field['name'] in self.protected_fields:
# Ignore for new - only change through workflow
pass
elif not field.get('custom'):
default = self.env.config.get('ticket',
'default_' + field['name'])
else:
default = field.get('value')
options = field.get('options')
if default and options and default not in options:
try:
default = options[int(default)]
except (ValueError, IndexError):
self.env.log.warning('Invalid default value "%s" '
'for custom field "%s"'
% (default, field['name']))
if default:
self.values.setdefault(field['name'], default)
def _fetch_ticket(self, tkt_id):
row = None
if self.id_is_valid(tkt_id):
# Fetch the standard ticket fields
for row in self.env.db_query("SELECT %s FROM ticket WHERE id=%%s" %
','.join(self.std_fields), (tkt_id,)):
break
if not row:
raise ResourceNotFound(_("Ticket %(id)s does not exist.",
id=tkt_id), _("Invalid ticket number"))
self.id = tkt_id
for i, field in enumerate(self.std_fields):
value = row[i]
if field in self.time_fields:
self.values[field] = from_utimestamp(value)
elif value is None:
self.values[field] = empty
else:
self.values[field] = value
# Fetch custom fields if available
for name, value in self.env.db_query("""
SELECT name, value FROM ticket_custom WHERE ticket=%s
""", (tkt_id,)):
if name in self.custom_fields:
if value is None:
self.values[name] = empty
else:
self.values[name] = value
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
"""Log ticket modifications so the table ticket_change can be updated
"""
if name in self.values and self.values[name] == value:
return
if name not in self._old: # Changed field
self._old[name] = self.values.get(name)
elif self._old[name] == value: # Change of field reverted
del self._old[name]
if value:
if isinstance(value, list):
raise TracError(_("Multi-values fields not supported yet"))
field = [field for field in self.fields if field['name'] == name]
if field and field[0].get('type') != 'textarea':
value = value.strip()
self.values[name] = value
def get_value_or_default(self, name):
"""Return the value of a field or the default value if it is undefined
"""
try:
value = self.values[name]
return value if value is not empty else self.get_default(name)
except KeyError:
pass
def get_default(self, name):
"""Return the default value of a field."""
field = [field for field in self.fields if field['name'] == name]
if field:
return field[0].get('value', '')
def populate(self, values):
"""Populate the ticket with 'suitable' values from a dictionary"""
field_names = [f['name'] for f in self.fields]
for name in [name for name in values.keys() if name in field_names]:
self[name] = values.get(name, '')
# We have to do an extra trick to catch unchecked checkboxes
for name in [name for name in values.keys() if name[9:] in field_names
and name.startswith('checkbox_')]:
if name[9:] not in values:
self[name[9:]] = '0'
def insert(self, when=None, db=None):
"""Add ticket to database.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert not self.exists, 'Cannot insert an existing ticket'
if 'cc' in self.values:
self['cc'] = _fixup_cc_list(self.values['cc'])
# Add a timestamp
if when is None:
when = datetime.now(utc)
self.values['time'] = self.values['changetime'] = when
# The owner field defaults to the component owner
if self.values.get('owner') == '< default >':
default_to_owner = ''
if self.values.get('component'):
try:
component = Component(self.env, self['component'])
default_to_owner = component.owner # even if it's empty
except ResourceNotFound:
# No such component exists
pass
# If the current owner is "< default >", we need to set it to
# _something_ else, even if that something else is blank.
self['owner'] = default_to_owner
# Perform type conversions
values = dict(self.values)
for field in self.time_fields:
if field in values:
values[field] = to_utimestamp(values[field])
# Insert ticket record
std_fields = []
custom_fields = []
for f in self.fields:
fname = f['name']
if fname in self.values:
if f.get('custom'):
custom_fields.append(fname)
else:
std_fields.append(fname)
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute("INSERT INTO ticket (%s) VALUES (%s)"
% (','.join(std_fields),
','.join(['%s'] * len(std_fields))),
[values[name] for name in std_fields])
if getattr(self.env, '_multiproduct_schema_enabled', False):
tkt_id = db.get_last_id(cursor, 'ticket', 'uid')
rows = db("""SELECT id FROM ticket WHERE uid=%s""", (tkt_id,))
if len(rows) != 1:
# One row SHOULD always be retrieved, but if it does not
# then insertion MUST fail since the cause may be a bug in
# BH SQL translator executing previous INSERT without
# product prefix properly setup.
# By raising the error the transaction should be rolled back
raise AssertionError("No ticket id for uid " + str(tkt_id))
tkt_id = rows[0][0]
else:
tkt_id = db.get_last_id(cursor, 'ticket')
# Insert custom fields
if custom_fields:
db.executemany(
"""INSERT INTO ticket_custom (ticket, name, value)
VALUES (%s, %s, %s)
""", [(tkt_id, c, self[c]) for c in custom_fields])
self.id = tkt_id
self.resource = self.resource(id=tkt_id)
self._old = {}
for listener in TicketSystem(self.env).change_listeners:
listener.ticket_created(self)
ResourceSystem(self.env).resource_created(self)
return self.id
def save_changes(self, author=None, comment=None, when=None, db=None,
cnum='', replyto=None):
"""
Store ticket changes in the database. The ticket must already exist in
the database. Returns False if there were no changes to save, True
otherwise.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
:since 1.0: the `cnum` parameter is deprecated, and threading should
be controlled with the `replyto` argument
"""
assert self.exists, "Cannot update a new ticket"
if 'cc' in self.values:
self['cc'] = _fixup_cc_list(self.values['cc'])
props_unchanged = all(self.values.get(k) == v
for k, v in self._old.iteritems())
if (not comment or not comment.strip()) and props_unchanged:
return False # Not modified
if when is None:
when = datetime.now(utc)
when_ts = to_utimestamp(when)
if 'component' in self.values:
# If the component is changed on a 'new' ticket
# then owner field is updated accordingly. (#623).
if self.values.get('status') == 'new' \
and 'component' in self._old \
and 'owner' not in self._old:
try:
old_comp = Component(self.env, self._old['component'])
old_owner = old_comp.owner or ''
current_owner = self.values.get('owner') or ''
if old_owner == current_owner:
new_comp = Component(self.env, self['component'])
if new_comp.owner:
self['owner'] = new_comp.owner
except TracError:
# If the old component has been removed from the database
# we just leave the owner as is.
pass
with self.env.db_transaction as db:
db("UPDATE ticket SET changetime=%s WHERE id=%s",
(when_ts, self.id))
# find cnum if it isn't provided
if not cnum:
num = 0
for ts, old in db("""
SELECT DISTINCT tc1.time, COALESCE(tc2.oldvalue,'')
FROM ticket_change AS tc1
LEFT OUTER JOIN ticket_change AS tc2
ON tc2.ticket=%s AND tc2.time=tc1.time
AND tc2.field='comment'
WHERE tc1.ticket=%s ORDER BY tc1.time DESC
""", (self.id, self.id)):
# Use oldvalue if available, else count edits
try:
num += int(old.rsplit('.', 1)[-1])
break
except ValueError:
num += 1
cnum = str(num + 1)
if replyto:
cnum = '%s.%s' % (replyto, cnum)
# store fields
for name in self._old.keys():
if name in self.custom_fields:
for row in db("""SELECT * FROM ticket_custom
WHERE ticket=%s and name=%s
""", (self.id, name)):
db("""UPDATE ticket_custom SET value=%s
WHERE ticket=%s AND name=%s
""", (self[name], self.id, name))
break
else:
db("""INSERT INTO ticket_custom (ticket,name,value)
VALUES(%s,%s,%s)
""", (self.id, name, self[name]))
else:
db("UPDATE ticket SET %s=%%s WHERE id=%%s"
% name, (self[name], self.id))
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (self.id, when_ts, author, name, self._old[name],
self[name]))
# always save comment, even if empty
# (numbering support for timeline)
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s,%s,%s,'comment',%s,%s)
""", (self.id, when_ts, author, cnum, comment))
old_values = self._old
self._old = {}
self.values['changetime'] = when
for listener in TicketSystem(self.env).change_listeners:
listener.ticket_changed(self, comment, author, old_values)
context = dict(comment=comment, author=author)
ResourceSystem(self.env).resource_changed(self, old_values, context)
return int(cnum.rsplit('.', 1)[-1])
def get_changelog(self, when=None, db=None):
"""Return the changelog as a list of tuples of the form
(time, author, field, oldvalue, newvalue, permanent).
While the other tuple elements are quite self-explanatory,
the `permanent` flag is used to distinguish collateral changes
that are not yet immutable (like attachments, currently).
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
sid = str(self.id)
when_ts = to_utimestamp(when)
if when_ts:
sql = """
SELECT time, author, field, oldvalue, newvalue, 1 AS permanent
FROM ticket_change WHERE ticket=%s AND time=%s
UNION
SELECT time, author, 'attachment', null, filename,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s AND time=%s
UNION
SELECT time, author, 'comment', null, description,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s AND time=%s
ORDER BY time,permanent,author
"""
args = (self.id, when_ts, sid, when_ts, sid, when_ts)
else:
sql = """
SELECT time, author, field, oldvalue, newvalue, 1 AS permanent
FROM ticket_change WHERE ticket=%s
UNION
SELECT time, author, 'attachment', null, filename,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s
UNION
SELECT time, author, 'comment', null, description,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s
ORDER BY time,permanent,author
"""
args = (self.id, sid, sid)
return [(from_utimestamp(t), author, field, oldvalue or '',
newvalue or '', permanent)
for t, author, field, oldvalue, newvalue, permanent in
self.env.db_query(sql, args)]
def delete(self, db=None):
"""Delete the ticket.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
with self.env.db_transaction as db:
Attachment.delete_all(self.env, 'ticket', self.id, db)
db("DELETE FROM ticket WHERE id=%s", (self.id,))
db("DELETE FROM ticket_change WHERE ticket=%s", (self.id,))
db("DELETE FROM ticket_custom WHERE ticket=%s", (self.id,))
for listener in TicketSystem(self.env).change_listeners:
listener.ticket_deleted(self)
ResourceSystem(self.env).resource_deleted(self)
def get_change(self, cnum=None, cdate=None, db=None):
"""Return a ticket change by its number or date.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if cdate is None:
row = self._find_change(cnum)
if not row:
return
cdate = from_utimestamp(row[0])
ts = to_utimestamp(cdate)
fields = {}
change = {'date': cdate, 'fields': fields}
for field, author, old, new in self.env.db_query("""
SELECT field, author, oldvalue, newvalue
FROM ticket_change WHERE ticket=%s AND time=%s
""", (self.id, ts)):
fields[field] = {'author': author, 'old': old, 'new': new}
if field == 'comment':
change['author'] = author
elif not field.startswith('_'):
change.setdefault('author', author)
if fields:
return change
def delete_change(self, cnum=None, cdate=None, when=None):
"""Delete a ticket change identified by its number or date."""
if cdate is None:
row = self._find_change(cnum)
if not row:
return
cdate = from_utimestamp(row[0])
ts = to_utimestamp(cdate)
if when is None:
when = datetime.now(utc)
when_ts = to_utimestamp(when)
with self.env.db_transaction as db:
# Find modified fields and their previous value
fields = [(field, old, new)
for field, old, new in db("""
SELECT field, oldvalue, newvalue FROM ticket_change
WHERE ticket=%s AND time=%s
""", (self.id, ts))
if field != 'comment' and not field.startswith('_')]
for field, oldvalue, newvalue in fields:
# Find the next change
for next_ts, in db("""SELECT time FROM ticket_change
WHERE ticket=%s AND time>%s AND field=%s
LIMIT 1
""", (self.id, ts, field)):
# Modify the old value of the next change if it is equal
# to the new value of the deleted change
db("""UPDATE ticket_change SET oldvalue=%s
WHERE ticket=%s AND time=%s AND field=%s
AND oldvalue=%s
""", (oldvalue, self.id, next_ts, field, newvalue))
break
else:
# No next change, edit ticket field
if field in self.std_fields:
db("UPDATE ticket SET %s=%%s WHERE id=%%s"
% field, (oldvalue, self.id))
else:
db("""UPDATE ticket_custom SET value=%s
WHERE ticket=%s AND name=%s
""", (oldvalue, self.id, field))
# Delete the change
db("DELETE FROM ticket_change WHERE ticket=%s AND time=%s",
(self.id, ts))
# Update last changed time
db("UPDATE ticket SET changetime=%s WHERE id=%s",
(when_ts, self.id))
self._fetch_ticket(self.id)
def modify_comment(self, cdate, author, comment, when=None):
"""Modify a ticket comment specified by its date, while keeping a
history of edits.
"""
ts = to_utimestamp(cdate)
if when is None:
when = datetime.now(utc)
when_ts = to_utimestamp(when)
with self.env.db_transaction as db:
# Find the current value of the comment
old_comment = False
for old_comment, in db("""
SELECT newvalue FROM ticket_change
WHERE ticket=%s AND time=%s AND field='comment'
""", (self.id, ts)):
break
if comment == (old_comment or ''):
return
# Comment history is stored in fields named "_comment%d"
# Find the next edit number
fields = db("""SELECT field FROM ticket_change
WHERE ticket=%%s AND time=%%s AND field %s
""" % db.like(),
(self.id, ts, db.like_escape('_comment') + '%'))
rev = max(int(field[8:]) for field, in fields) + 1 if fields else 0
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s,%s,%s,%s,%s,%s)
""", (self.id, ts, author, '_comment%d' % rev,
old_comment or '', str(when_ts)))
if old_comment is False:
# There was no comment field, add one, find the
# original author in one of the other changed fields
old_author = None
for old_author, in db("""
SELECT author FROM ticket_change
WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1
""" % db.like(),
(self.id, ts, db.like_escape('_') + '%')):
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s,%s,%s,'comment','',%s)
""", (self.id, ts, old_author, comment))
else:
db("""UPDATE ticket_change SET newvalue=%s
WHERE ticket=%s AND time=%s AND field='comment'
""", (comment, self.id, ts))
# Update last changed time
db("UPDATE ticket SET changetime=%s WHERE id=%s",
(when_ts, self.id))
self.values['changetime'] = when
def get_comment_history(self, cnum=None, cdate=None, db=None):
"""Retrieve the edit history of a comment identified by its number or
date.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if cdate is None:
row = self._find_change(cnum)
if not row:
return
ts0, author0, last_comment = row
else:
ts0, author0, last_comment = to_utimestamp(cdate), None, None
with self.env.db_query as db:
# Get last comment and author if not available
if last_comment is None:
last_comment = ''
for author0, last_comment in db("""
SELECT author, newvalue FROM ticket_change
WHERE ticket=%s AND time=%s AND field='comment'
""", (self.id, ts0)):
break
if author0 is None:
for author0, last_comment in db("""
SELECT author, newvalue FROM ticket_change
WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1
""" % db.like(),
(self.id, ts0, db.like_escape('_') + '%')):
break
else:
return
# Get all fields of the form "_comment%d"
rows = db("""SELECT field, author, oldvalue, newvalue
FROM ticket_change
WHERE ticket=%%s AND time=%%s AND field %s
""" % db.like(),
(self.id, ts0, db.like_escape('_comment') + '%'))
rows = sorted((int(field[8:]), author, old, new)
for field, author, old, new in rows)
history = []
for rev, author, comment, ts in rows:
history.append((rev, from_utimestamp(long(ts0)), author0,
comment))
ts0, author0 = ts, author
history.sort()
rev = history[-1][0] + 1 if history else 0
history.append((rev, from_utimestamp(long(ts0)), author0,
last_comment))
return history
def _find_change(self, cnum):
"""Find a comment by its number."""
scnum = str(cnum)
with self.env.db_query as db:
for row in db("""
SELECT time, author, newvalue FROM ticket_change
WHERE ticket=%%s AND field='comment'
AND (oldvalue=%%s OR oldvalue %s)
""" % db.like(),
(self.id, scnum, '%' + db.like_escape('.' + scnum))):
return row
# Fallback when comment number is not available in oldvalue
num = 0
for ts, old, author, comment in db("""
SELECT DISTINCT tc1.time, COALESCE(tc2.oldvalue,''),
tc2.author, COALESCE(tc2.newvalue,'')
FROM ticket_change AS tc1
LEFT OUTER JOIN ticket_change AS tc2
ON tc2.ticket=%s AND tc2.time=tc1.time
AND tc2.field='comment'
WHERE tc1.ticket=%s ORDER BY tc1.time
""", (self.id, self.id)):
# Use oldvalue if available, else count edits
try:
num = int(old.rsplit('.', 1)[-1])
except ValueError:
num += 1
if num == cnum:
break
else:
return
# Find author if NULL
if author is None:
for author, in db("""
SELECT author FROM ticket_change
WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1
""" % db.like(),
(self.id, ts, db.like_escape('_') + '%')):
break
return (ts, author, comment)
def simplify_whitespace(name):
"""Strip spaces and remove duplicate spaces within names"""
if name:
return ' '.join(name.split())
return name
class AbstractEnum(object):
type = None
ticket_col = None
def __init__(self, env, name=None, db=None):
if not self.ticket_col:
self.ticket_col = self.type
self.env = env
if name:
for value, in self.env.db_query("""
SELECT value FROM enum WHERE type=%s AND name=%s
""", (self.type, name)):
self.value = self._old_value = value
self.name = self._old_name = name
break
else:
raise ResourceNotFound(_("%(type)s %(name)s does not exist.",
type=self.type, name=name))
else:
self.value = self._old_value = None
self.name = self._old_name = None
exists = property(lambda self: self._old_value is not None)
def delete(self, db=None):
"""Delete the enum value.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot delete non-existent %s" % self.type
with self.env.db_transaction as db:
self.env.log.info("Deleting %s %s", self.type, self.name)
db("DELETE FROM enum WHERE type=%s AND value=%s",
(self.type, self._old_value))
# Re-order any enums that have higher value than deleted
# (close gap)
for enum in self.select(self.env):
try:
if int(enum.value) > int(self._old_value):
enum.value = unicode(int(enum.value) - 1)
enum.update()
except ValueError:
pass # Ignore cast error for this non-essential operation
TicketSystem(self.env).reset_ticket_fields()
ResourceSystem(self.env).resource_deleted(self)
self.value = self._old_value = None
self.name = self._old_name = None
def insert(self, db=None):
"""Add a new enum value.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert not self.exists, "Cannot insert existing %s" % self.type
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_('Invalid %(type)s name.', type=self.type))
with self.env.db_transaction as db:
self.env.log.debug("Creating new %s '%s'", self.type, self.name)
if not self.value:
row = db("SELECT COALESCE(MAX(%s), 0) FROM enum WHERE type=%%s"
% db.cast('value', 'int'),
(self.type,))
self.value = int(float(row[0][0])) + 1 if row else 0
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
(self.type, self.name, self.value))
TicketSystem(self.env).reset_ticket_fields()
self._old_name = self.name
self._old_value = self.value
ResourceSystem(self.env).resource_created(self)
def update(self, db=None):
"""Update the enum value.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot update non-existent %s" % self.type
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid %(type)s name.", type=self.type))
with self.env.db_transaction as db:
self.env.log.info("Updating %s '%s'", self.type, self.name)
db("UPDATE enum SET name=%s,value=%s WHERE type=%s AND name=%s",
(self.name, self.value, self.type, self._old_name))
if self.name != self._old_name:
# Update tickets
db("UPDATE ticket SET %s=%%s WHERE %s=%%s"
% (self.ticket_col, self.ticket_col),
(self.name, self._old_name))
TicketSystem(self.env).reset_ticket_fields()
old_values = dict()
if self.name != self._old_name:
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/__init__.py | trac/trac/ticket/__init__.py | from trac.ticket.api import *
from trac.ticket.default_workflow import *
from trac.ticket.model import *
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/notification.py | trac/trac/ticket/notification.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005-2006 Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
#
from __future__ import with_statement
from hashlib import md5
from genshi.template.text import NewTextTemplate
from trac.core import *
from trac.config import *
from trac.notification import NotifyEmail
from trac.ticket.api import TicketSystem
from trac.util.datefmt import to_utimestamp
from trac.util.text import obfuscate_email_address, text_width, wrap
from trac.util.translation import deactivate, reactivate
class TicketNotificationSystem(Component):
always_notify_owner = BoolOption('notification', 'always_notify_owner',
'false',
"""Always send notifications to the ticket owner (''since 0.9'').""")
always_notify_reporter = BoolOption('notification',
'always_notify_reporter',
'false',
"""Always send notifications to any address in the ''reporter''
field.""")
always_notify_updater = BoolOption('notification', 'always_notify_updater',
'true',
"""Always send notifications to the person who causes the ticket
property change and to any previous updater of that ticket.""")
ticket_subject_template = Option('notification', 'ticket_subject_template',
'$prefix #$ticket.id: $summary',
"""A Genshi text template snippet used to get the notification subject.
By default, the subject template is `$prefix #$ticket.id: $summary`.
`$prefix` being the value of the `smtp_subject_prefix` option.
''(since 0.11)''""")
batch_subject_template = Option('notification', 'batch_subject_template',
'$prefix Batch modify: $tickets_descr',
"""Like ticket_subject_template but for batch modifications.
By default, the template is `$prefix Batch modify: $tickets_descr`.
''(since 1.0)''""")
ambiguous_char_width = Option('notification', 'ambiguous_char_width',
'single',
"""Which width of ambiguous characters (e.g. 'single' or
'double') should be used in the table of notification mail.
If 'single', the same width as characters in US-ASCII. This is
expected by most users. If 'double', twice the width of
US-ASCII characters. This is expected by CJK users. ''(since
0.12.2)''""")
def get_ticket_notification_recipients(env, config, tktid, prev_cc):
notify_reporter = config.getbool('notification', 'always_notify_reporter')
notify_owner = config.getbool('notification', 'always_notify_owner')
notify_updater = config.getbool('notification', 'always_notify_updater')
ccrecipients = prev_cc
torecipients = []
with env.db_query as db:
# Harvest email addresses from the cc, reporter, and owner fields
for row in db("SELECT cc, reporter, owner FROM ticket WHERE id=%s",
(tktid,)):
if row[0]:
ccrecipients += row[0].replace(',', ' ').split()
reporter = row[1]
owner = row[2]
if notify_reporter:
torecipients.append(row[1])
if notify_owner:
torecipients.append(row[2])
break
# Harvest email addresses from the author field of ticket_change(s)
if notify_updater:
for author, ticket in db("""
SELECT DISTINCT author, ticket FROM ticket_change
WHERE ticket=%s
""", (tktid,)):
torecipients.append(author)
# Suppress the updater from the recipients
updater = None
for updater, in db("""
SELECT author FROM ticket_change WHERE ticket=%s
ORDER BY time DESC LIMIT 1
""", (tktid,)):
break
else:
for updater, in db("SELECT reporter FROM ticket WHERE id=%s",
(tktid,)):
break
if not notify_updater:
filter_out = True
if notify_reporter and (updater == reporter):
filter_out = False
if notify_owner and (updater == owner):
filter_out = False
if filter_out:
torecipients = [r for r in torecipients
if r and r != updater]
elif updater:
torecipients.append(updater)
return (torecipients, ccrecipients, reporter, owner)
class TicketNotifyEmail(NotifyEmail):
"""Notification of ticket changes."""
template_name = "ticket_notify_email.txt"
ticket = None
newticket = None
modtime = 0
from_email = 'trac+ticket@localhost'
COLS = 75
def __init__(self, env):
NotifyEmail.__init__(self, env)
self.prev_cc = []
ambiguous_char_width = env.config.get('notification',
'ambiguous_char_width',
'single')
self.ambiwidth = 2 if ambiguous_char_width == 'double' else 1
def notify(self, ticket, newticket=True, modtime=None):
"""Send ticket change notification e-mail (untranslated)"""
t = deactivate()
translated_fields = ticket.fields
try:
ticket.fields = TicketSystem(self.env).get_ticket_fields()
self._notify(ticket, newticket, modtime)
finally:
ticket.fields = translated_fields
reactivate(t)
def _notify(self, ticket, newticket=True, modtime=None):
self.ticket = ticket
self.modtime = modtime
self.newticket = newticket
changes_body = ''
self.reporter = ''
self.owner = ''
changes_descr = ''
change_data = {}
link = self.env.abs_href.ticket(ticket.id)
summary = self.ticket['summary']
author = None
if not self.newticket and modtime: # Ticket change
from trac.ticket.web_ui import TicketModule
for change in TicketModule(self.env).grouped_changelog_entries(
ticket, when=modtime):
if not change['permanent']: # attachment with same time...
continue
author = change['author']
change_data.update({
'author': self.obfuscate_email(author),
'comment': wrap(change['comment'], self.COLS, ' ', ' ',
'\n', self.ambiwidth)
})
link += '#comment:%s' % str(change.get('cnum', ''))
for field, values in change['fields'].iteritems():
old = values['old']
new = values['new']
newv = ''
if field == 'description':
new_descr = wrap(new, self.COLS, ' ', ' ', '\n',
self.ambiwidth)
old_descr = wrap(old, self.COLS, '> ', '> ', '\n',
self.ambiwidth)
old_descr = old_descr.replace(2 * '\n', '\n' + '>' + \
'\n')
cdescr = '\n'
cdescr += 'Old description:' + 2 * '\n' + old_descr + \
2 * '\n'
cdescr += 'New description:' + 2 * '\n' + new_descr + \
'\n'
changes_descr = cdescr
elif field == 'summary':
summary = "%s (was: %s)" % (new, old)
elif field == 'cc':
(addcc, delcc) = self.diff_cc(old, new)
chgcc = ''
if delcc:
chgcc += wrap(" * cc: %s (removed)" %
', '.join(delcc),
self.COLS, ' ', ' ', '\n',
self.ambiwidth) + '\n'
if addcc:
chgcc += wrap(" * cc: %s (added)" %
', '.join(addcc),
self.COLS, ' ', ' ', '\n',
self.ambiwidth) + '\n'
if chgcc:
changes_body += chgcc
self.prev_cc += self.parse_cc(old) if old else []
else:
if field in ['owner', 'reporter']:
old = self.obfuscate_email(old)
new = self.obfuscate_email(new)
newv = new
length = 7 + len(field)
spacer_old, spacer_new = ' ', ' '
if len(old + new) + length > self.COLS:
length = 5
if len(old) + length > self.COLS:
spacer_old = '\n'
if len(new) + length > self.COLS:
spacer_new = '\n'
chg = '* %s: %s%s%s=>%s%s' % (field, spacer_old, old,
spacer_old, spacer_new,
new)
chg = chg.replace('\n', '\n' + length * ' ')
chg = wrap(chg, self.COLS, '', length * ' ', '\n',
self.ambiwidth)
changes_body += ' %s%s' % (chg, '\n')
if newv:
change_data[field] = {'oldvalue': old, 'newvalue': new}
if newticket:
author = ticket['reporter']
ticket_values = ticket.values.copy()
ticket_values['id'] = ticket.id
ticket_values['description'] = wrap(
ticket_values.get('description', ''), self.COLS,
initial_indent=' ', subsequent_indent=' ', linesep='\n',
ambiwidth=self.ambiwidth)
ticket_values['new'] = self.newticket
ticket_values['link'] = link
subject = self.format_subj(summary)
if not self.newticket:
subject = 'Re: ' + subject
self.data.update({
'ticket_props': self.format_props(),
'ticket_body_hdr': self.format_hdr(),
'subject': subject,
'ticket': ticket_values,
'changes_body': changes_body,
'changes_descr': changes_descr,
'change': change_data
})
NotifyEmail.notify(self, ticket.id, subject, author)
def format_props(self):
tkt = self.ticket
fields = [f for f in tkt.fields
if f['name'] not in ('summary', 'cc', 'time', 'changetime')]
width = [0, 0, 0, 0]
i = 0
for f in fields:
if f['type'] == 'textarea':
continue
fname = f['name']
if not fname in tkt.values:
continue
fval = tkt[fname] or ''
if fval.find('\n') != -1:
continue
if fname in ['owner', 'reporter']:
fval = self.obfuscate_email(fval)
idx = 2 * (i % 2)
width[idx] = max(self.get_text_width(f['label']), width[idx])
width[idx + 1] = max(self.get_text_width(fval), width[idx + 1])
i += 1
width_l = width[0] + width[1] + 5
width_r = width[2] + width[3] + 5
half_cols = (self.COLS - 1) / 2
if width_l + width_r + 1 > self.COLS:
if ((width_l > half_cols and width_r > half_cols) or
(width[0] > half_cols / 2 or width[2] > half_cols / 2)):
width_l = half_cols
width_r = half_cols
elif width_l > width_r:
width_l = min((self.COLS - 1) * 2 / 3, width_l)
width_r = self.COLS - width_l - 1
else:
width_r = min((self.COLS - 1) * 2 / 3, width_r)
width_l = self.COLS - width_r - 1
sep = width_l * '-' + '+' + width_r * '-'
txt = sep + '\n'
cell_tmp = [u'', u'']
big = []
i = 0
width_lr = [width_l, width_r]
for f in [f for f in fields if f['name'] != 'description']:
fname = f['name']
if not tkt.values.has_key(fname):
continue
fval = tkt[fname] or ''
if fname in ['owner', 'reporter']:
fval = self.obfuscate_email(fval)
if f['type'] == 'textarea' or '\n' in unicode(fval):
big.append((f['label'], '\n'.join(fval.splitlines())))
else:
# Note: f['label'] is a Babel's LazyObject, make sure its
# __str__ method won't be called.
str_tmp = u'%s: %s' % (f['label'], unicode(fval))
idx = i % 2
cell_tmp[idx] += wrap(str_tmp, width_lr[idx] - 2 + 2 * idx,
(width[2 * idx]
- self.get_text_width(f['label'])
+ 2 * idx) * ' ',
2 * ' ', '\n', self.ambiwidth)
cell_tmp[idx] += '\n'
i += 1
cell_l = cell_tmp[0].splitlines()
cell_r = cell_tmp[1].splitlines()
for i in range(max(len(cell_l), len(cell_r))):
if i >= len(cell_l):
cell_l.append(width_l * ' ')
elif i >= len(cell_r):
cell_r.append('')
fmt_width = width_l - self.get_text_width(cell_l[i]) \
+ len(cell_l[i])
txt += u'%-*s|%s%s' % (fmt_width, cell_l[i], cell_r[i], '\n')
if big:
txt += sep
for name, value in big:
txt += '\n'.join(['', name + ':', value, '', ''])
txt += sep
return txt
def parse_cc(self, txt):
return filter(lambda x: '@' in x, txt.replace(',', ' ').split())
def diff_cc(self, old, new):
oldcc = NotifyEmail.addrsep_re.split(old)
newcc = NotifyEmail.addrsep_re.split(new)
added = [self.obfuscate_email(x) \
for x in newcc if x and x not in oldcc]
removed = [self.obfuscate_email(x) \
for x in oldcc if x and x not in newcc]
return (added, removed)
def format_hdr(self):
return '#%s: %s' % (self.ticket.id, wrap(self.ticket['summary'],
self.COLS, linesep='\n',
ambiwidth=self.ambiwidth))
def format_subj(self, summary):
template = self.config.get('notification','ticket_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'summary': summary,
'ticket': self.ticket,
'env': self.env,
}
return template.generate(**data).render('text', encoding=None).strip()
def get_recipients(self, tktid):
(torecipients, ccrecipients, reporter, owner) = \
get_ticket_notification_recipients(self.env, self.config,
tktid, self.prev_cc)
self.reporter = reporter
self.owner = owner
return (torecipients, ccrecipients)
def get_message_id(self, rcpt, modtime=None):
"""Generate a predictable, but sufficiently unique message ID."""
s = '%s.%08d.%d.%s' % (self.env.project_url.encode('utf-8'),
int(self.ticket.id), to_utimestamp(modtime),
rcpt.encode('ascii', 'ignore'))
dig = md5(s).hexdigest()
host = self.from_email[self.from_email.find('@') + 1:]
msgid = '<%03d.%s@%s>' % (len(s), dig, host)
return msgid
def send(self, torcpts, ccrcpts):
dest = self.reporter or 'anonymous'
hdrs = {}
hdrs['Message-ID'] = self.get_message_id(dest, self.modtime)
hdrs['X-Trac-Ticket-ID'] = str(self.ticket.id)
hdrs['X-Trac-Ticket-URL'] = self.data['ticket']['link']
if not self.newticket:
msgid = self.get_message_id(dest)
hdrs['In-Reply-To'] = msgid
hdrs['References'] = msgid
NotifyEmail.send(self, torcpts, ccrcpts, hdrs)
def get_text_width(self, text):
return text_width(text, ambiwidth=self.ambiwidth)
def obfuscate_email(self, text):
""" Obfuscate text when `show_email_addresses` is disabled in config.
Obfuscation happens once per email, regardless of recipients, so
cannot use permission-based obfuscation.
"""
if self.env.config.getbool('trac', 'show_email_addresses'):
return text
else:
return obfuscate_email_address(text)
class BatchTicketNotifyEmail(NotifyEmail):
"""Notification of ticket batch modifications."""
template_name = "batch_ticket_notify_email.txt"
def __init__(self, env):
NotifyEmail.__init__(self, env)
def notify(self, tickets, new_values, comment, action, author):
"""Send batch ticket change notification e-mail (untranslated)"""
t = deactivate()
try:
self._notify(tickets, new_values, comment, action, author)
finally:
reactivate(t)
def _notify(self, tickets, new_values, comment, action, author):
self.tickets = tickets
changes_body = ''
self.reporter = ''
self.owner = ''
changes_descr = '\n'.join(['%s to %s' % (prop, val)
for (prop, val) in new_values.iteritems()])
tickets_descr = ', '.join(['#%s' % t for t in tickets])
subject = self.format_subj(tickets_descr)
link = self.env.abs_href.query(id=','.join([str(t) for t in tickets]))
self.data.update({
'tickets_descr': tickets_descr,
'changes_descr': changes_descr,
'comment': comment,
'action': action,
'author': author,
'subject': subject,
'ticket_query_link': link,
})
NotifyEmail.notify(self, tickets, subject, author)
def format_subj(self, tickets_descr):
template = self.config.get('notification','batch_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'tickets_descr': tickets_descr,
'env': self.env,
}
return template.generate(**data).render('text', encoding=None).strip()
def get_recipients(self, tktids):
alltorecipients = []
allccrecipients = []
for t in tktids:
(torecipients, ccrecipients, reporter, owner) = \
get_ticket_notification_recipients(self.env, self.config,
t, [])
alltorecipients.extend(torecipients)
allccrecipients.extend(ccrecipients)
return (list(set(alltorecipients)), list(set(allccrecipients)))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/batch.py | trac/trac/ticket/batch.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Brian Meeker
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Brian Meeker <meeker.brian@gmail.com>
from __future__ import with_statement
import re
from datetime import datetime
from genshi.builder import tag
from trac.core import *
from trac.ticket import TicketSystem, Ticket
from trac.ticket.notification import BatchTicketNotifyEmail
from trac.util.datefmt import utc
from trac.util.text import exception_to_unicode, to_unicode
from trac.util.translation import _, tag_
from trac.web import IRequestHandler
from trac.web.chrome import add_warning, add_script_data
class BatchModifyModule(Component):
"""Ticket batch modification module.
This component allows multiple tickets to be modified in one request from
the custom query page. For users with the TICKET_BATCH_MODIFY permission
it will add a [TracBatchModify batch modify] section underneath custom
query results. Users can choose which tickets and fields they wish to
modify.
"""
implements(IRequestHandler)
list_separator_re = re.compile(r'[;\s,]+')
list_connector_string = ', '
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/batchmodify'
def process_request(self, req):
req.perm.assert_permission('TICKET_BATCH_MODIFY')
comment = req.args.get('batchmod_value_comment', '')
action = req.args.get('action')
new_values = self._get_new_ticket_values(req)
selected_tickets = self._get_selected_tickets(req)
self._save_ticket_changes(req, selected_tickets,
new_values, comment, action)
#Always redirect back to the query page we came from.
req.redirect(req.session['query_href'])
def _get_new_ticket_values(self, req):
"""Pull all of the new values out of the post data."""
values = {}
for field in TicketSystem(self.env).get_ticket_fields():
name = field['name']
if name not in ('id', 'resolution', 'status', 'owner', 'time',
'changetime', 'summary', 'reporter',
'description') and field['type'] != 'textarea':
value = req.args.get('batchmod_value_' + name)
if value is not None:
values[name] = value
return values
def _get_selected_tickets(self, req):
"""The selected tickets will be a comma separated list
in the request arguments."""
selected_tickets = req.args.get('selected_tickets')
if selected_tickets == '':
return []
else:
return selected_tickets.split(',')
def add_template_data(self, req, data, tickets):
data['batch_modify'] = True
data['query_href'] = req.session['query_href'] or req.href.query()
data['action_controls'] = self._get_action_controls(req, tickets)
batch_list_modes = [
{'name': _("add"), 'value': "+"},
{'name': _("remove"), 'value': "-"},
{'name': _("add / remove"), 'value': "+-"},
{'name': _("set to"), 'value': "="},
]
add_script_data(req, batch_list_modes=batch_list_modes,
batch_list_properties=self._get_list_fields())
def _get_list_fields(self):
return [f['name']
for f in TicketSystem(self.env).get_ticket_fields()
if f['type'] == 'text' and f.get('format') == 'list']
def _get_action_controls(self, req, tickets):
action_controls = []
ts = TicketSystem(self.env)
tickets_by_action = {}
for t in tickets:
ticket = Ticket(self.env, t['id'])
actions = ts.get_available_actions(req, ticket)
for action in actions:
tickets_by_action.setdefault(action, []).append(ticket)
sorted_actions = sorted(set(tickets_by_action.keys()))
for action in sorted_actions:
first_label = None
hints = []
widgets = []
ticket = tickets_by_action[action][0]
for controller in self._get_action_controllers(req, ticket,
action):
label, widget, hint = controller.render_ticket_action_control(
req, ticket, action)
if not first_label:
first_label = label
widgets.append(widget)
hints.append(hint)
action_controls.append((action, first_label, tag(widgets), hints))
return action_controls
def _get_action_controllers(self, req, ticket, action):
"""Generator yielding the controllers handling the given `action`"""
for controller in TicketSystem(self.env).action_controllers:
actions = [a for w, a in
controller.get_ticket_actions(req, ticket) or []]
if action in actions:
yield controller
def _save_ticket_changes(self, req, selected_tickets,
new_values, comment, action):
"""Save all of the changes to tickets."""
when = datetime.now(utc)
list_fields = self._get_list_fields()
with self.env.db_transaction as db:
for id in selected_tickets:
t = Ticket(self.env, int(id))
_values = new_values.copy()
for field in list_fields:
if field in new_values:
old = t.values[field] if field in t.values else ''
new = new_values[field]
mode = req.args.get('batchmod_value_' + field +
'_mode')
new2 = req.args.get('batchmod_value_' + field +
'_secondary', '')
_values[field] = self._change_list(old, new, new2,
mode)
controllers = list(self._get_action_controllers(req, t,
action))
for controller in controllers:
_values.update(controller.get_ticket_changes(req, t,
action))
t.populate(_values)
t.save_changes(req.authname, comment, when=when)
for controller in controllers:
controller.apply_action_side_effects(req, t, action)
try:
tn = BatchTicketNotifyEmail(self.env)
tn.notify(selected_tickets, new_values, comment, action,
req.authname)
except Exception, e:
self.log.error("Failure sending notification on ticket batch"
"change: %s", exception_to_unicode(e))
add_warning(req, tag_("The changes have been saved, but an "
"error occurred while sending "
"notifications: %(message)s",
message=to_unicode(e)))
def _change_list(self, old_list, new_list, new_list2, mode):
changed_list = [k.strip()
for k in self.list_separator_re.split(old_list)
if k]
new_list = [k.strip()
for k in self.list_separator_re.split(new_list)
if k]
new_list2 = [k.strip()
for k in self.list_separator_re.split(new_list2)
if k]
if mode == '=':
changed_list = new_list
elif mode == '+':
for entry in new_list:
if entry not in changed_list:
changed_list.append(entry)
elif mode == '-':
for entry in new_list:
while entry in changed_list:
changed_list.remove(entry)
elif mode == '+-':
for entry in new_list:
if entry not in changed_list:
changed_list.append(entry)
for entry in new_list2:
while entry in changed_list:
changed_list.remove(entry)
return self.list_connector_string.join(changed_list)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/default_workflow.py | trac/trac/ticket/default_workflow.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Alec Thomas
# Copyright (C) 2007 Eli Carter
# Copyright (C) 2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Eli Carter
import pkg_resources
from ConfigParser import RawConfigParser
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Configuration, ConfigSection
from trac.core import *
from trac.env import IEnvironmentSetupParticipant
from trac.perm import PermissionSystem
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.ticket.model import Resolution
from trac.util.text import obfuscate_email_address
from trac.util.translation import _, tag_, cleandoc_
from trac.web.chrome import Chrome, add_script, add_script_data
from trac.wiki.macros import WikiMacroBase
# -- Utilities for the ConfigurableTicketWorkflow
def parse_workflow_config(rawactions):
"""Given a list of options from [ticket-workflow]"""
actions = {}
for option, value in rawactions:
parts = option.split('.')
action = parts[0]
if action not in actions:
actions[action] = {'oldstates': '', 'newstate': ''}
if len(parts) == 1:
# Base name, of the syntax: old,states,here -> newstate
try:
oldstates, newstate = [x.strip() for x in value.split('->')]
except ValueError:
continue # Syntax error, a warning will be logged later
actions[action]['newstate'] = newstate
actions[action]['oldstates'] = oldstates
else:
action, attribute = option.split('.')
actions[action][attribute] = value
# Fill in the defaults for every action, and normalize them to the desired
# types
def as_list(key):
value = attributes.get(key, '')
return [item for item in (x.strip() for x in value.split(',')) if item]
for action, attributes in actions.items():
# Default the 'name' attribute to the name used in the ini file
if 'name' not in attributes:
attributes['name'] = action
# If not specified, an action is not the default.
attributes['default'] = int(attributes.get('default', 0))
# If operations are not specified, that means no operations
attributes['operations'] = as_list('operations')
# If no permissions are specified, then no permissions are needed
attributes['permissions'] = as_list('permissions')
# Normalize the oldstates
attributes['oldstates'] = as_list('oldstates')
return actions
def get_workflow_config(config):
"""Usually passed self.config, this will return the parsed ticket-workflow
section.
"""
raw_actions = list(config.options('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
return actions
def load_workflow_config_snippet(config, filename):
"""Loads the ticket-workflow section from the given file (expected to be in
the 'workflows' tree) into the provided config.
"""
filename = pkg_resources.resource_filename('trac.ticket',
'workflows/%s' % filename)
new_config = Configuration(filename)
for name, value in new_config.options('ticket-workflow'):
config.set('ticket-workflow', name, value)
class ConfigurableTicketWorkflow(Component):
"""Ticket action controller which provides actions according to a
workflow defined in trac.ini.
The workflow is idefined in the `[ticket-workflow]` section of the
[wiki:TracIni#ticket-workflow-section trac.ini] configuration file.
"""
ticket_workflow_section = ConfigSection('ticket-workflow',
"""The workflow for tickets is controlled by plugins. By default,
there's only a `ConfigurableTicketWorkflow` component in charge.
That component allows the workflow to be configured via this section
in the `trac.ini` file. See TracWorkflow for more details.
(''since 0.11'')""")
def __init__(self, *args, **kwargs):
self.actions = get_workflow_config(self.config)
if not '_reset' in self.actions:
# Special action that gets enabled if the current status no longer
# exists, as no other action can then change its state. (#5307)
self.actions['_reset'] = {
'default': 0,
'name': 'reset',
'newstate': 'new',
'oldstates': [], # Will not be invoked unless needed
'operations': ['reset_workflow'],
'permissions': []}
self.log.debug('Workflow actions at initialization: %s\n' %
str(self.actions))
for name, info in self.actions.iteritems():
if not info['newstate']:
self.log.warning("Ticket workflow action '%s' doesn't define "
"any transitions", name)
implements(ITicketActionController, IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""When an environment is created, we provide the basic-workflow,
unless a ticket-workflow section already exists.
"""
if not 'ticket-workflow' in self.config.sections():
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.save()
self.actions = get_workflow_config(self.config)
def environment_needs_upgrade(self, db):
"""The environment needs an upgrade if there is no [ticket-workflow]
section in the config.
"""
return not list(self.config.options('ticket-workflow'))
def upgrade_environment(self, db):
"""Insert a [ticket-workflow] section using the original-workflow"""
load_workflow_config_snippet(self.config, 'original-workflow.ini')
self.config.save()
self.actions = get_workflow_config(self.config)
info_message = """
==== Upgrade Notice ====
The ticket Workflow is now configurable.
Your environment has been upgraded, but configured to use the original
workflow. It is recommended that you look at changing this configuration to use
basic-workflow.
Read TracWorkflow for more information (don't forget to 'wiki upgrade' as well)
"""
self.log.info(info_message.replace('\n', ' ').replace('==', ''))
print info_message
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
"""Returns a list of (weight, action) tuples that are valid for this
request and this ticket."""
# Get the list of actions that can be performed
# Determine the current status of this ticket. If this ticket is in
# the process of being modified, we need to base our information on the
# pre-modified state so that we don't try to do two (or more!) steps at
# once and get really confused.
status = ticket._old.get('status', ticket['status']) or 'new'
ticket_perm = req.perm(ticket.resource)
allowed_actions = []
for action_name, action_info in self.actions.items():
oldstates = action_info['oldstates']
if oldstates == ['*'] or status in oldstates:
# This action is valid in this state. Check permissions.
required_perms = action_info['permissions']
if self._is_action_allowed(ticket_perm, required_perms):
allowed_actions.append((action_info['default'],
action_name))
if not (status in ['new', 'closed'] or \
status in TicketSystem(self.env).get_all_status()) \
and 'TICKET_ADMIN' in ticket_perm:
# State no longer exists - add a 'reset' action if admin.
allowed_actions.append((0, '_reset'))
return allowed_actions
def _is_action_allowed(self, ticket_perm, required_perms):
if not required_perms:
return True
for permission in required_perms:
if permission in ticket_perm:
return True
return False
def get_all_status(self):
"""Return a list of all states described by the configuration.
"""
all_status = set()
for action_name, action_info in self.actions.items():
all_status.update(action_info['oldstates'])
all_status.add(action_info['newstate'])
all_status.discard('*')
all_status.discard('')
return all_status
def render_ticket_action_control(self, req, ticket, action):
self.log.debug('render_ticket_action_control: action "%s"' % action)
this_action = self.actions[action]
status = this_action['newstate']
operations = this_action['operations']
current_owner = ticket._old.get('owner', ticket['owner'] or '(none)')
if not (Chrome(self.env).show_email_addresses
or 'EMAIL_VIEW' in req.perm(ticket.resource)):
format_user = obfuscate_email_address
else:
format_user = lambda address: address
current_owner = format_user(current_owner)
control = [] # default to nothing
hints = []
if 'reset_workflow' in operations:
control.append(tag("from invalid state "))
hints.append(_("Current state no longer exists"))
if 'del_owner' in operations:
hints.append(_("The ticket will be disowned"))
if 'set_owner' in operations:
id = 'action_%s_reassign_owner' % action
selected_owner = req.args.get(id, req.authname)
if this_action.has_key('set_owner'):
owners = [x.strip() for x in
this_action['set_owner'].split(',')]
elif self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
owners = perm.get_users_with_permission('TICKET_MODIFY')
owners.sort()
else:
owners = None
if owners == None:
owner = req.args.get(id, req.authname)
control.append(tag_('to %(owner)s',
owner=tag.input(type='text', id=id,
name=id, value=owner)))
hints.append(_("The owner will be changed from "
"%(current_owner)s",
current_owner=current_owner))
elif len(owners) == 1:
owner = tag.input(type='hidden', id=id, name=id,
value=owners[0])
formatted_owner = format_user(owners[0])
control.append(tag_('to %(owner)s ',
owner=tag(formatted_owner, owner)))
if ticket['owner'] != owners[0]:
hints.append(_("The owner will be changed from "
"%(current_owner)s to %(selected_owner)s",
current_owner=current_owner,
selected_owner=formatted_owner))
else:
control.append(tag_('to %(owner)s', owner=tag.select(
[tag.option(x, value=x,
selected=(x == selected_owner or None))
for x in owners],
id=id, name=id)))
hints.append(_("The owner will be changed from "
"%(current_owner)s to the selected user",
current_owner=current_owner))
elif 'set_owner_to_self' in operations and \
ticket._old.get('owner', ticket['owner']) != req.authname:
hints.append(_("The owner will be changed from %(current_owner)s "
"to %(authname)s", current_owner=current_owner,
authname=req.authname))
if 'set_resolution' in operations:
if this_action.has_key('set_resolution'):
resolutions = [x.strip() for x in
this_action['set_resolution'].split(',')]
else:
resolutions = [val.name for val in Resolution.select(self.env)]
if not resolutions:
raise TracError(_("Your workflow attempts to set a resolution "
"but none is defined (configuration issue, "
"please contact your Trac admin)."))
id = 'action_%s_resolve_resolution' % action
if len(resolutions) == 1:
resolution = tag.input(type='hidden', id=id, name=id,
value=resolutions[0])
control.append(tag_('as %(resolution)s',
resolution=tag(resolutions[0],
resolution)))
hints.append(_("The resolution will be set to %(name)s",
name=resolutions[0]))
else:
selected_option = req.args.get(id,
TicketSystem(self.env).default_resolution)
control.append(tag_('as %(resolution)s',
resolution=tag.select(
[tag.option(x, value=x,
selected=(x == selected_option or None))
for x in resolutions],
id=id, name=id)))
hints.append(_("The resolution will be set"))
if 'del_resolution' in operations:
hints.append(_("The resolution will be deleted"))
if 'leave_status' in operations:
control.append(_('as %(status)s ',
status= ticket._old.get('status',
ticket['status'])))
else:
if status != '*':
hints.append(_("Next status will be '%(name)s'", name=status))
return (this_action['name'], tag(*control), '. '.join(hints) + ".")
def get_ticket_changes(self, req, ticket, action):
this_action = self.actions[action]
# Enforce permissions
if not self._has_perms_for_action(req, this_action, ticket.resource):
# The user does not have any of the listed permissions, so we won't
# do anything.
return {}
updated = {}
# Status changes
status = this_action['newstate']
if status != '*':
updated['status'] = status
for operation in this_action['operations']:
if operation == 'reset_workflow':
updated['status'] = 'new'
elif operation == 'del_owner':
updated['owner'] = ''
elif operation == 'set_owner':
newowner = req.args.get('action_%s_reassign_owner' % action,
this_action.get('set_owner', '').strip())
# If there was already an owner, we get a list, [new, old],
# but if there wasn't we just get new.
if type(newowner) == list:
newowner = newowner[0]
updated['owner'] = newowner
elif operation == 'set_owner_to_self':
updated['owner'] = req.authname
elif operation == 'del_resolution':
updated['resolution'] = ''
elif operation == 'set_resolution':
newresolution = req.args.get('action_%s_resolve_resolution' % \
action,
this_action.get('set_resolution', '').strip())
updated['resolution'] = newresolution
# leave_status is just a no-op here, so we don't look for it.
return updated
def apply_action_side_effects(self, req, ticket, action):
pass
def _has_perms_for_action(self, req, action, resource):
required_perms = action['permissions']
if required_perms:
for permission in required_perms:
if permission in req.perm(resource):
break
else:
# The user does not have any of the listed permissions
return False
return True
# Public methods (for other ITicketActionControllers that want to use
# our config file and provide an operation for an action)
def get_actions_by_operation(self, operation):
"""Return a list of all actions with a given operation
(for use in the controller's get_all_status())
"""
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations']]
return actions
def get_actions_by_operation_for_req(self, req, ticket, operation):
"""Return list of all actions with a given operation that are valid
in the given state for the controller's get_ticket_actions().
If state='*' (the default), all actions with the given operation are
returned.
"""
# Be sure to look at the original status.
status = ticket._old.get('status', ticket['status'])
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations'] and
('*' in info['oldstates'] or
status in info['oldstates']) and
self._has_perms_for_action(req, info, ticket.resource)]
return actions
class WorkflowMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Render a workflow graph.
This macro accepts a TracWorkflow configuration and renders the states
and transitions as a directed graph. If no parameters are given, the
current ticket workflow is rendered. In WikiProcessors mode the `width`
and `height` arguments can be specified.
(Defaults: `width = 800` and `heigth = 600`)
Examples:
{{{
[[Workflow()]]
[[Workflow(go = here -> there; return = there -> here)]]
{{{
#!Workflow width=700 height=700
leave = * -> *
leave.operations = leave_status
leave.default = 1
accept = new,assigned,accepted,reopened -> accepted
accept.permissions = TICKET_MODIFY
accept.operations = set_owner_to_self
resolve = new,assigned,accepted,reopened -> closed
resolve.permissions = TICKET_MODIFY
resolve.operations = set_resolution
reassign = new,assigned,accepted,reopened -> assigned
reassign.permissions = TICKET_MODIFY
reassign.operations = set_owner
reopen = closed -> reopened
reopen.permissions = TICKET_CREATE
reopen.operations = del_resolution
}}}
}}}
""")
def expand_macro(self, formatter, name, text, args):
if not text:
raw_actions = self.config.options('ticket-workflow')
else:
if args is None:
text = '\n'.join([line.lstrip() for line in text.split(';')])
if not '[ticket-workflow]' in text:
text = '[ticket-workflow]\n' + text
parser = RawConfigParser()
parser.readfp(StringIO(text))
raw_actions = list(parser.items('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
states = list(set(
[state for action in actions.itervalues()
for state in action['oldstates']] +
[action['newstate'] for action in actions.itervalues()]))
action_names = actions.keys()
edges = []
for name, action in actions.items():
new_index = states.index(action['newstate'])
name_index = action_names.index(name)
for old_state in action['oldstates']:
old_index = states.index(old_state)
edges.append((old_index, new_index, name_index))
args = args or {}
graph = {'nodes': states, 'actions': action_names, 'edges': edges,
'width': args.get('width', 800),
'height': args.get('height', 600)}
graph_id = '%012x' % id(graph)
req = formatter.req
add_script(req, 'common/js/excanvas.js', ie_if='IE')
add_script(req, 'common/js/workflow_graph.js')
add_script_data(req, {'graph_%s' % graph_id: graph})
return tag.div(_("Enable JavaScript to display the workflow graph."),
class_='trac-workflow-graph system-message',
id='trac-workflow-graph-%s' % graph_id)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/roadmap.py | trac/trac/ticket/roadmap.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
from StringIO import StringIO
from datetime import datetime, timedelta
import re
from genshi.builder import tag
from trac import __version__
from trac.attachment import AttachmentModule
from trac.config import ConfigSection, ExtensionOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.resource import *
from trac.search import ISearchSource, search_to_regexps, shorten_result
from trac.util import as_bool
from trac.util.datefmt import parse_date, utc, to_utimestamp, to_datetime, \
get_datetime_format_hint, format_date, \
format_datetime, from_utimestamp, user_time
from trac.util.text import CRLF
from trac.util.translation import _, tag_
from trac.ticket.api import TicketSystem
from trac.ticket.model import Milestone, MilestoneCache, Ticket, \
group_milestones
from trac.timeline.api import ITimelineEventProvider
from trac.web import IRequestHandler, RequestDone
from trac.web.chrome import (Chrome, INavigationContributor,
add_link, add_notice, add_script, add_stylesheet,
add_warning, auth_link, prevnext_nav, web_context)
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class ITicketGroupStatsProvider(Interface):
def get_ticket_group_stats(ticket_ids):
""" Gather statistics on a group of tickets.
This method returns a valid `TicketGroupStats` object.
"""
class TicketGroupStats(object):
"""Encapsulates statistics on a group of tickets."""
def __init__(self, title, unit):
"""
:param title: the display name of this group of stats (e.g.
``'ticket status'``)
:param unit: is the units for these stats in plural form,
e.g. ``_('hours'``)
"""
self.title = title
self.unit = unit
self.count = 0
self.qry_args = {}
self.intervals = []
self.done_percent = 0
self.done_count = 0
def add_interval(self, title, count, qry_args, css_class,
overall_completion=None):
"""Adds a division to this stats' group's progress bar.
:param title: the display name (e.g. ``'closed'``, ``'spent
effort'``) of this interval that will be
displayed in front of the unit name
:param count: the number of units in the interval
:param qry_args: a dict of extra params that will yield the
subset of tickets in this interval on a query.
:param css_class: is the css class that will be used to
display the division
:param overall_completion: can be set to true to make this
interval count towards overall
completion of this group of
tickets.
.. versionchanged :: 0.12
deprecated `countsToProg` argument was removed, use
`overall_completion` instead
"""
self.intervals.append({
'title': title,
'count': count,
'qry_args': qry_args,
'css_class': css_class,
'percent': None,
'overall_completion': overall_completion,
})
self.count = self.count + count
def refresh_calcs(self):
if self.count < 1:
return
total_percent = 0
self.done_percent = 0
self.done_count = 0
for interval in self.intervals:
interval['percent'] = round(float(interval['count'] /
float(self.count) * 100))
total_percent = total_percent + interval['percent']
if interval['overall_completion']:
self.done_percent += interval['percent']
self.done_count += interval['count']
# We want the percentages to add up to 100%. To do that, we fudge one
# of the intervals. If we're <100%, we add to the smallest non-zero
# interval. If we're >100%, we subtract from the largest interval.
# The interval is adjusted by enough to make the intervals sum to 100%.
if self.done_count and total_percent != 100:
fudge_amt = 100 - total_percent
fudge_int = [i for i in sorted(self.intervals,
key=lambda k: k['percent'],
reverse=(fudge_amt < 0))
if i['percent']][0]
fudge_int['percent'] += fudge_amt
self.done_percent += fudge_amt
class DefaultTicketGroupStatsProvider(Component):
"""Configurable ticket group statistics provider.
See :teo:`TracIni#milestone-groups-section` for a detailed
example configuration.
"""
implements(ITicketGroupStatsProvider)
milestone_groups_section = ConfigSection('milestone-groups',
"""As the workflow for tickets is now configurable, there can
be many ticket states, and simply displaying closed tickets
vs. all the others is maybe not appropriate in all cases. This
section enables one to easily create ''groups'' of states that
will be shown in different colors in the milestone progress
bar.
Note that the groups can only be based on the ticket
//status//, nothing else. In particular, it's not possible to
distinguish between different closed tickets based on the
//resolution//.
Example configuration with three groups, //closed//, //new//
and //active// (the default only has closed and active):
{{{
# the 'closed' group correspond to the 'closed' tickets
closed = closed
# .order: sequence number in the progress bar
closed.order = 0
# .query_args: optional parameters for the corresponding
# query. In this example, the changes from the
# default are two additional columns ('created' and
# 'modified'), and sorting is done on 'created'.
closed.query_args = group=resolution,order=time,col=id,col=summary,col=owner,col=type,col=priority,col=component,col=severity,col=time,col=changetime
# .overall_completion: indicates groups that count for overall
# completion percentage
closed.overall_completion = true
new = new
new.order = 1
new.css_class = new
new.label = new
# Note: one catch-all group for other statuses is allowed
active = *
active.order = 2
# .css_class: CSS class for this interval
active.css_class = open
# .label: displayed label for this group
active.label = in progress
}}}
The definition consists in a comma-separated list of accepted
status. Also, '*' means any status and could be used to
associate all remaining states to one catch-all group.
The CSS class can be one of: new (yellow), open (no color) or
closed (green). Other styles can easily be added using custom
CSS rule: `table.progress td.<class> { background: <color> }`
to a [TracInterfaceCustomization#SiteAppearance site/style.css] file
for example.
(''since 0.11'')""")
default_milestone_groups = [
{'name': 'closed', 'status': 'closed',
'query_args': 'group=resolution', 'overall_completion': 'true'},
{'name': 'active', 'status': '*', 'css_class': 'open'}
]
def _get_ticket_groups(self):
"""Returns a list of dict describing the ticket groups
in the expected order of appearance in the milestone progress bars.
"""
if 'milestone-groups' in self.config:
groups = {}
order = 0
for groupname, value in self.milestone_groups_section.options():
qualifier = 'status'
if '.' in groupname:
groupname, qualifier = groupname.split('.', 1)
group = groups.setdefault(groupname, {'name': groupname,
'order': order})
group[qualifier] = value
order = max(order, int(group['order'])) + 1
return [group for group in sorted(groups.values(),
key=lambda g: int(g['order']))]
else:
return self.default_milestone_groups
def get_ticket_group_stats(self, ticket_ids):
total_cnt = len(ticket_ids)
all_statuses = set(TicketSystem(self.env).get_all_status())
status_cnt = {}
for s in all_statuses:
status_cnt[s] = 0
if total_cnt:
for status, count in self.env.db_query("""
SELECT status, count(status) FROM ticket
WHERE id IN (%s) GROUP BY status
""" % ",".join(str(x) for x in sorted(ticket_ids))):
status_cnt[status] = count
stat = TicketGroupStats(_('ticket status'), _('tickets'))
remaining_statuses = set(all_statuses)
groups = self._get_ticket_groups()
catch_all_group = None
# we need to go through the groups twice, so that the catch up group
# doesn't need to be the last one in the sequence
for group in groups:
status_str = group['status'].strip()
if status_str == '*':
if catch_all_group:
raise TracError(_(
"'%(group1)s' and '%(group2)s' milestone groups "
"both are declared to be \"catch-all\" groups. "
"Please check your configuration.",
group1=group['name'], group2=catch_all_group['name']))
catch_all_group = group
else:
group_statuses = set([s.strip()
for s in status_str.split(',')]) \
& all_statuses
if group_statuses - remaining_statuses:
raise TracError(_(
"'%(groupname)s' milestone group reused status "
"'%(status)s' already taken by other groups. "
"Please check your configuration.",
groupname=group['name'],
status=', '.join(group_statuses - remaining_statuses)))
else:
remaining_statuses -= group_statuses
group['statuses'] = group_statuses
if catch_all_group:
catch_all_group['statuses'] = remaining_statuses
for group in groups:
group_cnt = 0
query_args = {}
for s, cnt in status_cnt.iteritems():
if s in group['statuses']:
group_cnt += cnt
query_args.setdefault('status', []).append(s)
for arg in [kv for kv in group.get('query_args', '').split(',')
if '=' in kv]:
k, v = [a.strip() for a in arg.split('=', 1)]
query_args.setdefault(k, []).append(v)
stat.add_interval(group.get('label', group['name']),
group_cnt, query_args,
group.get('css_class', group['name']),
as_bool(group.get('overall_completion')))
stat.refresh_calcs()
return stat
def get_ticket_stats(provider, tickets):
return provider.get_ticket_group_stats([t['id'] for t in tickets])
def get_tickets_for_milestone(env, db=None, milestone=None, field='component'):
"""Retrieve all tickets associated with the given `milestone`.
.. versionchanged :: 1.0
the `db` parameter is no longer needed and will be removed in
version 1.1.1
"""
with env.db_query as db:
fields = TicketSystem(env).get_ticket_fields()
if field in [f['name'] for f in fields if not f.get('custom')]:
sql = """SELECT id, status, %s FROM ticket WHERE milestone=%%s
ORDER BY %s""" % (field, field)
args = (milestone,)
else:
sql = """SELECT id, status, value FROM ticket
LEFT OUTER JOIN ticket_custom ON (id=ticket AND name=%s)
WHERE milestone=%s ORDER BY value"""
args = (field, milestone)
return [{'id': tkt_id, 'status': status, field: fieldval}
for tkt_id, status, fieldval in env.db_query(sql, args)]
def apply_ticket_permissions(env, req, tickets):
"""Apply permissions to a set of milestone tickets as returned by
`get_tickets_for_milestone()`."""
return [t for t in tickets
if 'TICKET_VIEW' in req.perm('ticket', t['id'])]
def milestone_stats_data(env, req, stat, name, grouped_by='component',
group=None):
from trac.ticket.query import QueryModule
has_query = env[QueryModule] is not None
def query_href(extra_args):
if not has_query:
return None
args = {'milestone': name, grouped_by: group, 'group': 'status'}
args.update(extra_args)
return req.href.query(args)
return {'stats': stat,
'stats_href': query_href(stat.qry_args),
'interval_hrefs': [query_href(interval['qry_args'])
for interval in stat.intervals]}
def grouped_stats_data(env, stats_provider, tickets, by, per_group_stats_data):
"""Get the `tickets` stats data grouped by ticket field `by`.
`per_group_stats_data(gstat, group_name)` should return a data dict to
include for the group with field value `group_name`.
"""
group_names = []
for field in TicketSystem(env).get_ticket_fields():
if field['name'] == by:
if 'options' in field:
group_names = field['options']
if field.get('optional'):
group_names.insert(0, '')
else:
group_names = [name for name, in env.db_query("""
SELECT DISTINCT COALESCE(%s, '') FROM ticket
ORDER BY COALESCE(%s, '')
""" % (by, by))]
max_count = 0
data = []
for name in group_names:
values = (name,) if name else (None, name)
group_tickets = [t for t in tickets if t[by] in values]
if not group_tickets:
continue
gstat = get_ticket_stats(stats_provider, group_tickets)
if gstat.count > max_count:
max_count = gstat.count
gs_dict = {'name': name}
gs_dict.update(per_group_stats_data(gstat, name))
data.append(gs_dict)
for gs_dict in data:
percent = 1.0
if max_count:
gstat = gs_dict['stats']
percent = float(gstat.count) / float(max_count) * 100
gs_dict['percent_of_max_total'] = percent
return data
class RoadmapModule(Component):
"""Give an overview over all the milestones."""
implements(INavigationContributor, IPermissionRequestor, IRequestHandler)
stats_provider = ExtensionOption('roadmap', 'stats_provider',
ITicketGroupStatsProvider,
'DefaultTicketGroupStatsProvider',
"""Name of the component implementing `ITicketGroupStatsProvider`,
which is used to collect statistics on groups of tickets for display
in the roadmap views.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'roadmap'
def get_navigation_items(self, req):
if 'ROADMAP_VIEW' in req.perm:
yield ('mainnav', 'roadmap',
tag.a(_('Roadmap'), href=req.href.roadmap(), accesskey=3))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['MILESTONE_CREATE', 'MILESTONE_DELETE', 'MILESTONE_MODIFY',
'MILESTONE_VIEW', 'ROADMAP_VIEW']
return ['ROADMAP_VIEW'] + [('ROADMAP_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/roadmap'
def process_request(self, req):
req.perm.require('MILESTONE_VIEW')
show = req.args.getlist('show')
if 'all' in show:
show = ['completed']
milestones = Milestone.select(self.env, 'completed' in show)
if 'noduedate' in show:
milestones = [m for m in milestones
if m.due is not None or m.completed]
milestones = [m for m in milestones
if 'MILESTONE_VIEW' in req.perm(m.resource)]
stats = []
queries = []
for milestone in milestones:
tickets = get_tickets_for_milestone(
self.env, milestone=milestone.name, field='owner')
tickets = apply_ticket_permissions(self.env, req, tickets)
stat = get_ticket_stats(self.stats_provider, tickets)
stats.append(milestone_stats_data(self.env, req, stat,
milestone.name))
#milestone['tickets'] = tickets # for the iCalendar view
if req.args.get('format') == 'ics':
self._render_ics(req, milestones)
return
# FIXME should use the 'webcal:' scheme, probably
username = None
if req.authname and req.authname != 'anonymous':
username = req.authname
icshref = req.href.roadmap(show=show, user=username, format='ics')
add_link(req, 'alternate', auth_link(req, icshref), _('iCalendar'),
'text/calendar', 'ics')
data = {
'milestones': milestones,
'milestone_stats': stats,
'queries': queries,
'show': show,
}
add_stylesheet(req, 'common/css/roadmap.css')
return 'roadmap.html', data, None
# Internal methods
def _render_ics(self, req, milestones):
req.send_response(200)
req.send_header('Content-Type', 'text/calendar;charset=utf-8')
buf = StringIO()
from trac.ticket import Priority
priorities = {}
for priority in Priority.select(self.env):
priorities[priority.name] = float(priority.value)
def get_priority(ticket):
value = priorities.get(ticket['priority'])
if value:
return int((len(priorities) + 8 * value - 9) /
(len(priorities) - 1))
def get_status(ticket):
status = ticket['status']
if status == 'new' or status == 'reopened' and not ticket['owner']:
return 'NEEDS-ACTION'
elif status == 'assigned' or status == 'reopened':
return 'IN-PROCESS'
elif status == 'closed':
if ticket['resolution'] == 'fixed':
return 'COMPLETED'
else:
return 'CANCELLED'
else: return ''
def escape_value(text):
s = ''.join(map(lambda c: '\\' + c if c in ';,\\' else c, text))
return '\\n'.join(re.split(r'[\r\n]+', s))
def write_prop(name, value, params={}):
text = ';'.join([name] + [k + '=' + v for k, v in params.items()]) \
+ ':' + escape_value(value)
firstline = 1
while text:
if not firstline:
text = ' ' + text
else:
firstline = 0
buf.write(text[:75] + CRLF)
text = text[75:]
def write_date(name, value, params={}):
params['VALUE'] = 'DATE'
write_prop(name, format_date(value, '%Y%m%d', req.tz), params)
def write_utctime(name, value, params={}):
write_prop(name, format_datetime(value, '%Y%m%dT%H%M%SZ', utc),
params)
host = req.base_url[req.base_url.find('://') + 3:]
user = req.args.get('user', 'anonymous')
write_prop('BEGIN', 'VCALENDAR')
write_prop('VERSION', '2.0')
write_prop('PRODID', '-//Edgewall Software//NONSGML Trac %s//EN'
% __version__)
write_prop('METHOD', 'PUBLISH')
write_prop('X-WR-CALNAME',
self.env.project_name + ' - ' + _('Roadmap'))
write_prop('X-WR-CALDESC', self.env.project_description)
write_prop('X-WR-TIMEZONE', str(req.tz))
for milestone in milestones:
uid = '<%s/milestone/%s@%s>' % (req.base_path, milestone.name,
host)
if milestone.due:
write_prop('BEGIN', 'VEVENT')
write_prop('UID', uid)
write_utctime('DTSTAMP', milestone.due)
write_date('DTSTART', milestone.due)
write_prop('SUMMARY', _('Milestone %(name)s',
name=milestone.name))
write_prop('URL', req.abs_href.milestone(milestone.name))
if milestone.description:
write_prop('DESCRIPTION', milestone.description)
write_prop('END', 'VEVENT')
tickets = get_tickets_for_milestone(
self.env, milestone=milestone.name, field='owner')
tickets = apply_ticket_permissions(self.env, req, tickets)
for tkt_id in [ticket['id'] for ticket in tickets
if ticket['owner'] == user]:
ticket = Ticket(self.env, tkt_id)
write_prop('BEGIN', 'VTODO')
write_prop('UID', '<%s/ticket/%s@%s>' % (req.base_path,
tkt_id, host))
if milestone.due:
write_prop('RELATED-TO', uid)
write_date('DUE', milestone.due)
write_prop('SUMMARY', _('Ticket #%(num)s: %(summary)s',
num=ticket.id,
summary=ticket['summary']))
write_prop('URL', req.abs_href.ticket(ticket.id))
write_prop('DESCRIPTION', ticket['description'])
priority = get_priority(ticket)
if priority:
write_prop('PRIORITY', unicode(priority))
write_prop('STATUS', get_status(ticket))
if ticket['status'] == 'closed':
for time, in self.env.db_query("""
SELECT time FROM ticket_change
WHERE ticket=%s AND field='status'
ORDER BY time desc LIMIT 1
""", (ticket.id,)):
write_utctime('COMPLETED', from_utimestamp(time))
write_prop('END', 'VTODO')
write_prop('END', 'VCALENDAR')
ics_str = buf.getvalue().encode('utf-8')
req.send_header('Content-Length', len(ics_str))
req.end_headers()
req.write(ics_str)
raise RequestDone
class MilestoneModule(Component):
"""View and edit individual milestones."""
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
ITimelineEventProvider, IWikiSyntaxProvider, IResourceManager,
ISearchSource)
stats_provider = ExtensionOption('milestone', 'stats_provider',
ITicketGroupStatsProvider,
'DefaultTicketGroupStatsProvider',
"""Name of the component implementing `ITicketGroupStatsProvider`,
which is used to collect statistics on groups of tickets for display
in the milestone views.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'roadmap'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['MILESTONE_CREATE', 'MILESTONE_DELETE', 'MILESTONE_MODIFY',
'MILESTONE_VIEW']
return actions + [('MILESTONE_ADMIN', actions)]
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'MILESTONE_VIEW' in req.perm:
yield ('milestone', _('Milestones reached'))
def get_timeline_events(self, req, start, stop, filters):
if 'milestone' in filters:
milestone_realm = Resource('milestone')
for name, due, completed, description \
in MilestoneCache(self.env).milestones.itervalues():
if completed and start <= completed <= stop:
# TODO: creation and (later) modifications should also be
# reported
milestone = milestone_realm(id=name)
if 'MILESTONE_VIEW' in req.perm(milestone):
yield ('milestone', completed, '', # FIXME: author?
(milestone, description))
# Attachments
for event in AttachmentModule(self.env).get_timeline_events(
req, milestone_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
milestone, description = event[3]
if field == 'url':
return context.href.milestone(milestone.id)
elif field == 'title':
return tag_('Milestone %(name)s completed',
name=tag.em(milestone.id))
elif field == 'description':
return format_to(self.env, None, context.child(resource=milestone),
description)
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/milestone(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
req.args['id'] = match.group(1)
return True
def process_request(self, req):
milestone_id = req.args.get('id')
req.perm('milestone', milestone_id).require('MILESTONE_VIEW')
add_link(req, 'up', req.href.roadmap(), _('Roadmap'))
action = req.args.get('action', 'view')
try:
milestone = Milestone(self.env, milestone_id)
except ResourceNotFound:
if 'MILESTONE_CREATE' not in req.perm('milestone', milestone_id):
raise
milestone = Milestone(self.env, None)
milestone.name = milestone_id
action = 'edit' # rather than 'new' so that it works for POST/save
if req.method == 'POST':
if req.args.has_key('cancel'):
if milestone.exists:
req.redirect(req.href.milestone(milestone.name))
else:
req.redirect(req.href.roadmap())
elif action == 'edit':
return self._do_save(req, milestone)
elif action == 'delete':
self._do_delete(req, milestone)
elif action in ('new', 'edit'):
return self._render_editor(req, milestone)
elif action == 'delete':
return self._render_confirm(req, milestone)
if not milestone.name:
req.redirect(req.href.roadmap())
return self._render_view(req, milestone)
# Internal methods
def _do_delete(self, req, milestone):
req.perm(milestone.resource).require('MILESTONE_DELETE')
retarget_to = None
if req.args.has_key('retarget'):
retarget_to = req.args.get('target') or None
milestone.delete(retarget_to, req.authname)
add_notice(req, _('The milestone "%(name)s" has been deleted.',
name=milestone.name))
req.redirect(req.href.roadmap())
def _do_save(self, req, milestone):
if milestone.exists:
req.perm(milestone.resource).require('MILESTONE_MODIFY')
else:
req.perm(milestone.resource).require('MILESTONE_CREATE')
old_name = milestone.name
new_name = req.args.get('name')
milestone.description = req.args.get('description', '')
if 'due' in req.args:
due = req.args.get('duedate', '')
milestone.due = user_time(req, parse_date, due, hint='datetime') \
if due else None
else:
milestone.due = None
completed = req.args.get('completeddate', '')
retarget_to = req.args.get('target')
# Instead of raising one single error, check all the constraints and
# let the user fix them by going back to edit mode showing the warnings
warnings = []
def warn(msg):
add_warning(req, msg)
warnings.append(msg)
# -- check the name
# If the name has changed, check that the milestone doesn't already
# exist
# FIXME: the whole .exists business needs to be clarified
# (#4130) and should behave like a WikiPage does in
# this respect.
try:
new_milestone = Milestone(self.env, new_name)
if new_milestone.name == old_name:
pass # Creation or no name change
elif new_milestone.name:
warn(_('Milestone "%(name)s" already exists, please '
'choose another name.', name=new_milestone.name))
else:
warn(_('You must provide a name for the milestone.'))
except ResourceNotFound:
milestone.name = new_name
# -- check completed date
if 'completed' in req.args:
completed = user_time(req, parse_date, completed,
hint='datetime') if completed else None
if completed and completed > datetime.now(utc):
warn(_('Completion date may not be in the future'))
else:
completed = None
milestone.completed = completed
if warnings:
return self._render_editor(req, milestone)
# -- actually save changes
if milestone.exists:
milestone.update()
# eventually retarget opened tickets associated with the milestone
if 'retarget' in req.args and completed:
self.env.db_transaction("""
UPDATE ticket SET milestone=%s
WHERE milestone=%s and status != 'closed'
""", (retarget_to, old_name))
self.log.info("Tickets associated with milestone %s "
"retargeted to %s" % (old_name, retarget_to))
else:
milestone.insert()
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.milestone(milestone.name))
def _render_confirm(self, req, milestone):
req.perm(milestone.resource).require('MILESTONE_DELETE')
milestones = [m for m in Milestone.select(self.env)
if m.name != milestone.name
and 'MILESTONE_VIEW' in req.perm(m.resource)]
data = {
'milestone': milestone,
'milestone_groups': group_milestones(milestones,
'TICKET_ADMIN' in req.perm)
}
return 'milestone_delete.html', data, None
def _render_editor(self, req, milestone):
# Suggest a default due time of 18:00 in the user's timezone
now = datetime.now(req.tz)
default_due = datetime(now.year, now.month, now.day, 18)
if now.hour > 18:
default_due += timedelta(days=1)
default_due = to_datetime(default_due, req.tz)
data = {
'milestone': milestone,
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/wikisyntax.py | trac/trac/ticket/tests/wikisyntax.py | # -*- coding: utf-8 -*-
import unittest
from trac.ticket.model import Ticket
from trac.ticket.roadmap import Milestone
from trac.wiki.tests import formatter
TICKET_TEST_CASES = u"""
============================== ticket: link resolver
ticket:1
ticket:12
ticket:abc
------------------------------
<p>
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">ticket:1</a>
<a class="missing ticket">ticket:12</a>
<a class="missing ticket">ticket:abc</a>
</p>
------------------------------
============================== ticket: link resolver + arguments
ticket:1?format=csv
ticket:1#comment:3
------------------------------
<p>
<a class="new ticket" href="/ticket/1?format=csv" title="This is the summary (new)">ticket:1?format=csv</a>
<a class="new ticket" href="/ticket/1#comment:3" title="This is the summary (new)">ticket:1#comment:3</a>
</p>
------------------------------
============================== ticket: link resolver with ranges
ticket:12-14,33
ticket:12,33?order=created
------------------------------
<p>
<a href="/query?id=12-14%2C33" title="Tickets 12-14, 33">ticket:12-14,33</a>
<a href="/query?id=12%2C33&order=created" title="Tickets 12, 33">ticket:12,33?order=created</a>
</p>
------------------------------
============================== ticket link shorthand form
#1, #2
#12, #abc
------------------------------
<p>
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">#1</a>, <a class="missing ticket">#2</a>
<a class="missing ticket">#12</a>, #abc
</p>
------------------------------
============================== ticket link shorthand form with ranges
#1-5,42
#1,3,5,7
------------------------------
<p>
<a href="/query?id=1-5%2C42" title="Tickets 1-5, 42">#1-5,42</a>
<a href="/query?id=1%2C3%2C5%2C7" title="Tickets 1, 3, 5, 7">#1,3,5,7</a>
</p>
------------------------------
============================== ticket link shorthand form with long ranges (#10111 regression)
#1-123456789012345678901234
------------------------------
<p>
<a href="/query?id=1-123456789012345678901234" title="Tickets 1-123456789012345678901234">#1-123456789012345678901234</a>
</p>
------------------------------
============================== escaping the above
!#1
------------------------------
<p>
#1
</p>
------------------------------
#1
============================== InterTrac for tickets
trac:ticket:2041
[trac:ticket:2041 Trac #2041]
#T2041
#trac2041
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>trac:ticket:2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>Trac #2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>#T2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/ticket%3A2041" title="ticket:2041 in Trac's Trac"><span class="icon"></span>#trac2041</a>
</p>
------------------------------
============================== Ticket InterTrac shorthands
T:#2041
trac:#2041
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/%232041" title="#2041 in Trac's Trac"><span class="icon"></span>T:#2041</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/%232041" title="#2041 in Trac's Trac"><span class="icon"></span>trac:#2041</a>
</p>
------------------------------
============================== ticket syntax with unicode digits
#⁴²
#1-⁵,42
#1,³,5,7
#T²⁰⁴¹
#trac²⁰⁴¹
------------------------------
<p>
#⁴²
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">#1</a>-⁵,42
<a class="new ticket" href="/ticket/1" title="This is the summary (new)">#1</a>,³,5,7
#T²⁰⁴¹
#trac²⁰⁴¹
</p>
------------------------------
""" # "
def ticket_setup(tc):
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'santa',
'summary': 'This is the summary',
'status': 'new'})
ticket.insert()
def ticket_teardown(tc):
tc.env.reset_db()
REPORT_TEST_CASES = u"""
============================== report link shorthand form
{1}, {2}
{12}, {abc}
------------------------------
<p>
<a class="report" href="/report/1">{1}</a>, <a class="report" href="/report/2">{2}</a>
<a class="report" href="/report/12">{12}</a>, {abc}
</p>
------------------------------
============================== escaping the above
!{1}
------------------------------
<p>
{1}
</p>
------------------------------
{1}
============================== ticket shorthands, not numerical HTML entities
 
------------------------------
<p>
&#1; &#23;
</p>
------------------------------
&#1; &#23;
============================== InterTrac for reports
trac:report:1
[trac:report:1 Trac r1]
{T1}
{trac1}
{trac 1}
------------------------------
<p>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>trac:report:1</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>Trac r1</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>{T1}</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>{trac1}</a>
<a class="ext-link" href="http://trac.edgewall.org/intertrac/report%3A1" title="report:1 in Trac's Trac"><span class="icon"></span>{trac 1}</a>
</p>
------------------------------
============================== report syntax with unicode digits
{⁴²} !{⁴²}
{T⁴²}
{trac⁴²}
------------------------------
<p>
{⁴²} !{⁴²}
{T⁴²}
{trac⁴²}
</p>
------------------------------
""" # '
def report_setup(tc):
pass # TBD
MILESTONE_TEST_CASES = u"""
============================== milestone: link resolver
milestone:foo
[milestone:boo Milestone Boo]
[milestone:roo Milestone Roo]
------------------------------
<p>
<a class="missing milestone" href="/milestone/foo" rel="nofollow">milestone:foo</a>
<a class="milestone" href="/milestone/boo">Milestone Boo</a>
<a class="closed milestone" href="/milestone/roo">Milestone Roo</a>
</p>
------------------------------
============================== milestone: link resolver + arguments
milestone:?action=new
[milestone:boo#KnownIssues Known Issues for 1.0]
------------------------------
<p>
<a class="missing milestone" href="/milestone/?action=new" rel="nofollow">milestone:?action=new</a>
<a class="milestone" href="/milestone/boo#KnownIssues">Known Issues for 1.0</a>
</p>
------------------------------
""" #"
def milestone_setup(tc):
from datetime import datetime
from trac.util.datefmt import utc
boo = Milestone(tc.env)
boo.name = 'boo'
boo.completed = boo.due = None
boo.insert()
roo = Milestone(tc.env)
roo.name = 'roo'
roo.completed = datetime.now(utc)
roo.due = None
roo.insert()
def milestone_teardown(tc):
tc.env.reset_db()
QUERY_TEST_CASES = u"""
============================== query: link resolver
query:?order=priority
query:?order=priority&owner=me
query:?type=résumé
query:status=new|reopened
query:reporter!=
query:reporter=joe|jack&owner=me
query:group=owner
query:verbose=1
query:summary=résumé
------------------------------
<p>
<a class="query" href="/query?order=priority">query:?order=priority</a>
</p>
<p>
<a class="query" href="/query?order=priority&owner=me">query:?order=priority&owner=me</a>
</p>
<p>
<a class="query" href="/query?type=r%C3%A9sum%C3%A9">query:?type=résumé</a>
</p>
<p>
<a class="query" href="/query?status=new&status=reopened&order=priority">query:status=new|reopened</a>
</p>
<p>
<a class="query" href="/query?reporter=!&order=priority">query:reporter!=</a>
</p>
<p>
<a class="query" href="/query?owner=me&reporter=joe&reporter=jack&order=priority">query:reporter=joe|jack&owner=me</a>
</p>
<p>
<a class="query" href="/query?group=owner&order=priority">query:group=owner</a>
</p>
<p>
<a class="query" href="/query?row=description&order=priority">query:verbose=1</a>
</p>
<p>
<a class="query" href="/query?summary=r%C3%A9sum%C3%A9&order=priority">query:summary=résumé</a>
</p>
------------------------------
============================== TicketQuery macro: no results, list form
Reopened tickets: [[TicketQuery(status=reopened)]]
------------------------------
<p>
Reopened tickets: <span class="query_no_results">No results</span>
</p>
------------------------------
============================== TicketQuery macro: no results, count 0
Reopened tickets: [[TicketQuery(status=reopened, format=count)]]
------------------------------
<p>
Reopened tickets: <span class="query_count" title="0 tickets for which status=reopened&max=0&order=id">0</span>
</p>
------------------------------
============================== TicketQuery macro: no results, compact form
Reopened tickets: [[TicketQuery(status=reopened, format=compact)]]
------------------------------
<p>
Reopened tickets: <span class="query_no_results">No results</span>
</p>
------------------------------
============================== TicketQuery macro: one result, list form
New tickets: [[TicketQuery(status=new)]]
------------------------------
<p>
New tickets: </p><div><dl class="wiki compact"><dt><a class="new" href="/ticket/1" title="This is the summary">#1</a></dt><dd>This is the summary</dd></dl></div><p>
</p>
------------------------------
============================== TicketQuery macro: one result, count 1
New tickets: [[TicketQuery(status=new, format=count)]]
------------------------------
<p>
New tickets: <span class="query_count" title="1 tickets for which status=new&max=0&order=id">1</span>
</p>
------------------------------
============================== TicketQuery macro: one result, compact form
New tickets: [[TicketQuery(status=new, format=compact)]]
------------------------------
<p>
New tickets: <span><a class="new" href="/ticket/1" title="This is the summary">#1</a></span>
</p>
------------------------------
"""
QUERY2_TEST_CASES = u"""
============================== TicketQuery macro: two results, list form
New tickets: [[TicketQuery(status=new, order=reporter)]]
------------------------------
<p>
New tickets: </p><div><dl class="wiki compact"><dt><a class="new" href="/ticket/2" title="This is another summary">#2</a></dt><dd>This is another summary</dd><dt><a class="new" href="/ticket/1" title="This is the summary">#1</a></dt><dd>This is the summary</dd></dl></div><p>
</p>
------------------------------
============================== TicketQuery macro: two results, count 2
New tickets: [[TicketQuery(status=new, order=reporter, format=count)]]
------------------------------
<p>
New tickets: <span class="query_count" title="2 tickets for which status=new&max=0&order=reporter">2</span>
</p>
------------------------------
============================== TicketQuery macro: two results, compact form
New tickets: [[TicketQuery(status=new, order=reporter, format=compact)]]
------------------------------
<p>
New tickets: <span><a class="new" href="/ticket/2" title="This is another summary">#2</a>, <a class="new" href="/ticket/1" title="This is the summary">#1</a></span>
</p>
------------------------------
"""
def query2_setup(tc):
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'santa',
'summary': 'This is the summary',
'status': 'new'})
ticket.insert()
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'claus',
'summary': 'This is another summary',
'status': 'new'})
ticket.insert()
def query2_teardown(tc):
tc.env.reset_db()
COMMENT_TEST_CASES = u"""
============================== comment: link resolver (deprecated)
comment:ticket:123:2 (deprecated)
[comment:ticket:123:2 see above] (deprecated)
[comment:ticket:123:description see descr] (deprecated)
------------------------------
<p>
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">comment:ticket:123:2</a> (deprecated)
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">see above</a> (deprecated)
<a href="/ticket/123#comment:description" title="Comment description for Ticket #123">see descr</a> (deprecated)
</p>
------------------------------
============================== comment: link resolver
comment:2:ticket:123
[comment:2:ticket:123 see above]
[comment:description:ticket:123 see descr]
------------------------------
<p>
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">comment:2:ticket:123</a>
<a href="/ticket/123#comment:2" title="Comment 2 for Ticket #123">see above</a>
<a href="/ticket/123#comment:description" title="Comment description for Ticket #123">see descr</a>
</p>
------------------------------
""" # "
# NOTE: the following test cases:
#
# comment:2
# [comment:2 see above]
#
# would trigger an error in the workaround code ../api.py, line 235
# As it's a problem with a temp workaround, I think there's no need
# to fix it for now.
def suite():
suite = unittest.TestSuite()
suite.addTest(formatter.suite(TICKET_TEST_CASES, ticket_setup, __file__,
ticket_teardown))
suite.addTest(formatter.suite(REPORT_TEST_CASES, report_setup, __file__))
suite.addTest(formatter.suite(MILESTONE_TEST_CASES, milestone_setup,
__file__, milestone_teardown))
suite.addTest(formatter.suite(QUERY_TEST_CASES, ticket_setup, __file__,
ticket_teardown))
suite.addTest(formatter.suite(QUERY2_TEST_CASES, query2_setup, __file__,
query2_teardown))
suite.addTest(formatter.suite(COMMENT_TEST_CASES, file=__file__))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/report.py | trac/trac/ticket/tests/report.py | # -*- coding: utf-8 -*-
import doctest
from trac.db.mysql_backend import MySQLConnection
from trac.ticket.report import ReportModule
from trac.test import EnvironmentStub, Mock
from trac.web.api import Request, RequestDone
import trac
import unittest
from StringIO import StringIO
class MockMySQLConnection(MySQLConnection):
def __init__(self):
pass
class ReportTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.report_module = ReportModule(self.env)
def tearDown(self):
self.env.reset_db()
def _make_environ(self, scheme='http', server_name='example.org',
server_port=80, method='GET', script_name='/trac',
**kwargs):
environ = {'wsgi.url_scheme': scheme, 'wsgi.input': StringIO(''),
'REQUEST_METHOD': method, 'SERVER_NAME': server_name,
'SERVER_PORT': server_port, 'SCRIPT_NAME': script_name}
environ.update(kwargs)
return environ
def test_sub_var_no_quotes(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"$VAR", {'VAR': 'value'})
self.assertEqual("%s", sql)
self.assertEqual(['value'], values)
self.assertEqual([], missing_args)
def test_sub_var_digits_underscore(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"$_VAR, $VAR2, $2VAR", {'_VAR': 'value1', 'VAR2': 'value2'})
self.assertEqual("%s, %s, $2VAR", sql)
self.assertEqual(['value1', 'value2'], values)
self.assertEqual([], missing_args)
def test_sub_var_quotes(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"'$VAR'", {'VAR': 'value'})
self.assertEqual(self.env.get_read_db().concat("''", '%s', "''"), sql)
self.assertEqual(['value'], values)
self.assertEqual([], missing_args)
def test_sub_var_missing_args(self):
sql, values, missing_args = self.report_module.sql_sub_vars(
"$VAR, $PARAM, $MISSING", {'VAR': 'value'})
self.assertEqual("%s, %s, %s", sql)
self.assertEqual(['value', '', ''], values)
self.assertEqual(['PARAM', 'MISSING'], missing_args)
def test_csv_escape(self):
buf = StringIO()
def start_response(status, headers):
return buf.write
environ = self._make_environ()
req = Request(environ, start_response)
cols = ['TEST_COL', 'TEST_ZERO']
rows = [('value, needs escaped', 0)]
try:
self.report_module._send_csv(req, cols, rows)
except RequestDone:
pass
self.assertEqual('\xef\xbb\xbfTEST_COL,TEST_ZERO\r\n"value, needs escaped",0\r\n',
buf.getvalue())
def test_saved_custom_query_redirect(self):
query = u'query:?type=résumé'
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("INSERT INTO report (title,query,description) "
"VALUES (%s,%s,%s)", ('redirect', query, ''))
id = db.get_last_id(cursor, 'report')
db.commit()
headers_sent = {}
def start_response(status, headers):
headers_sent.update(dict(headers))
environ = self._make_environ()
req = Request(environ, start_response)
req.authname = 'anonymous'
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone,
self.report_module._render_view, req, id)
self.assertEqual('http://example.org/trac/query?' + \
'type=r%C3%A9sum%C3%A9&report=' + str(id),
headers_sent['Location'])
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(trac.ticket.report))
suite.addTest(unittest.makeSuite(ReportTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/api.py | trac/trac/ticket/tests/api.py | from trac.perm import PermissionCache, PermissionSystem
from trac.ticket.api import TicketSystem, ITicketFieldProvider
from trac.ticket.model import Ticket
from trac.test import EnvironmentStub, Mock
from trac.core import implements, Component
import unittest
class TestFieldProvider(Component):
implements(ITicketFieldProvider)
def __init__(self):
self.raw_fields = []
def get_select_fields(self):
return []
def get_radio_fields(self):
return []
def get_raw_fields(self):
return self.raw_fields
class TicketSystemTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.perm = PermissionSystem(self.env)
self.ticket_system = TicketSystem(self.env)
self.req = Mock()
def tearDown(self):
self.env.reset_db()
def _get_actions(self, ticket_dict):
ts = TicketSystem(self.env)
ticket = Ticket(self.env)
ticket.populate(ticket_dict)
id = ticket.insert()
return ts.get_available_actions(self.req, Ticket(self.env, id))
def test_custom_field_text(self):
self.env.config.set('ticket-custom', 'test', 'text')
self.env.config.set('ticket-custom', 'test.label', 'Test')
self.env.config.set('ticket-custom', 'test.value', 'Foo bar')
self.env.config.set('ticket-custom', 'test.format', 'wiki')
fields = TicketSystem(self.env).get_custom_fields()
self.assertEqual({'name': 'test', 'type': 'text', 'label': 'Test',
'value': 'Foo bar', 'order': 0, 'format': 'wiki'},
fields[0])
def test_custom_field_select(self):
self.env.config.set('ticket-custom', 'test', 'select')
self.env.config.set('ticket-custom', 'test.label', 'Test')
self.env.config.set('ticket-custom', 'test.value', '1')
self.env.config.set('ticket-custom', 'test.options', 'option1|option2')
fields = TicketSystem(self.env).get_custom_fields()
self.assertEqual({'name': 'test', 'type': 'select', 'label': 'Test',
'value': '1', 'options': ['option1', 'option2'],
'order': 0},
fields[0])
def test_custom_field_optional_select(self):
self.env.config.set('ticket-custom', 'test', 'select')
self.env.config.set('ticket-custom', 'test.label', 'Test')
self.env.config.set('ticket-custom', 'test.value', '1')
self.env.config.set('ticket-custom', 'test.options', '|option1|option2')
fields = TicketSystem(self.env).get_custom_fields()
self.assertEqual({'name': 'test', 'type': 'select', 'label': 'Test',
'value': '1', 'options': ['option1', 'option2'],
'order': 0, 'optional': True},
fields[0])
def test_custom_field_textarea(self):
self.env.config.set('ticket-custom', 'test', 'textarea')
self.env.config.set('ticket-custom', 'test.label', 'Test')
self.env.config.set('ticket-custom', 'test.value', 'Foo bar')
self.env.config.set('ticket-custom', 'test.cols', '60')
self.env.config.set('ticket-custom', 'test.rows', '4')
self.env.config.set('ticket-custom', 'test.format', 'wiki')
fields = TicketSystem(self.env).get_custom_fields()
self.assertEqual({'name': 'test', 'type': 'textarea', 'label': 'Test',
'value': 'Foo bar', 'width': 60, 'height': 4,
'order': 0, 'format': 'wiki'},
fields[0])
def test_custom_field_order(self):
self.env.config.set('ticket-custom', 'test1', 'text')
self.env.config.set('ticket-custom', 'test1.order', '2')
self.env.config.set('ticket-custom', 'test2', 'text')
self.env.config.set('ticket-custom', 'test2.order', '1')
fields = TicketSystem(self.env).get_custom_fields()
self.assertEqual('test2', fields[0]['name'])
self.assertEqual('test1', fields[1]['name'])
def test_available_actions_full_perms(self):
self.perm.grant_permission('anonymous', 'TICKET_CREATE')
self.perm.grant_permission('anonymous', 'TICKET_MODIFY')
self.req.perm = PermissionCache(self.env)
self.assertEqual(['leave', 'resolve', 'reassign', 'accept'],
self._get_actions({'status': 'new'}))
self.assertEqual(['leave', 'resolve', 'reassign', 'accept'],
self._get_actions({'status': 'assigned'}))
self.assertEqual(['leave', 'resolve', 'reassign', 'accept'],
self._get_actions({'status': 'accepted'}))
self.assertEqual(['leave', 'resolve', 'reassign', 'accept'],
self._get_actions({'status': 'reopened'}))
self.assertEqual(['leave', 'reopen'],
self._get_actions({'status': 'closed'}))
def test_available_actions_no_perms(self):
self.req.perm = PermissionCache(self.env)
self.assertEqual(['leave'], self._get_actions({'status': 'new'}))
self.assertEqual(['leave'], self._get_actions({'status': 'assigned'}))
self.assertEqual(['leave'], self._get_actions({'status': 'accepted'}))
self.assertEqual(['leave'], self._get_actions({'status': 'reopened'}))
self.assertEqual(['leave'], self._get_actions({'status': 'closed'}))
def test_available_actions_create_only(self):
self.perm.grant_permission('anonymous', 'TICKET_CREATE')
self.req.perm = PermissionCache(self.env)
self.assertEqual(['leave'], self._get_actions({'status': 'new'}))
self.assertEqual(['leave'], self._get_actions({'status': 'assigned'}))
self.assertEqual(['leave'], self._get_actions({'status': 'accepted'}))
self.assertEqual(['leave'], self._get_actions({'status': 'reopened'}))
self.assertEqual(['leave', 'reopen'],
self._get_actions({'status': 'closed'}))
def test_available_actions_chgprop_only(self):
# CHGPROP is not enough for changing a ticket's state (#3289)
self.perm.grant_permission('anonymous', 'TICKET_CHGPROP')
self.req.perm = PermissionCache(self.env)
self.assertEqual(['leave'], self._get_actions({'status': 'new'}))
self.assertEqual(['leave'], self._get_actions({'status': 'assigned'}))
self.assertEqual(['leave'], self._get_actions({'status': 'accepted'}))
self.assertEqual(['leave'], self._get_actions({'status': 'reopened'}))
self.assertEqual(['leave'], self._get_actions({'status': 'closed'}))
def test_can_add_raw_fields_from_field_providers(self):
testFieldProvider = self.env[TestFieldProvider]
self.assertIsNotNone(testFieldProvider)
testFieldProvider.raw_fields = [
{
'name': "test_name",
'type': 'some_type',
'label': "some_label",
},
]
fields = TicketSystem(self.env).get_ticket_fields()
row_added_fields = [
field for field in fields if field["name"] == "test_name"]
self.assertEqual(1, len(row_added_fields))
def test_does_not_add_duplicated_raw_fields_from_field_providers(self):
testFieldProvider = self.env[TestFieldProvider]
self.assertIsNotNone(testFieldProvider)
testFieldProvider.raw_fields = [
{
'name': "test_name",
'type': 'some_type1',
'label': "some_label1",
},
{
'name': "test_name",
'type': 'some_type2',
'label': "some_label2",
},
]
fields = TicketSystem(self.env).get_ticket_fields()
row_added_fields = [
field for field in fields if field["name"] == "test_name"]
self.assertEqual(1, len(row_added_fields))
def suite():
return unittest.makeSuite(TicketSystemTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/query.py | trac/trac/ticket/tests/query.py | from trac.test import Mock, EnvironmentStub, MockPerm, locale_en
from trac.ticket.query import Query, QueryModule, TicketQueryMacro
from trac.util.datefmt import utc
from trac.web.chrome import web_context
from trac.web.href import Href
from trac.wiki.formatter import LinkFormatter
import unittest
import difflib
# Note: we don't want to replicate 1:1 all the SQL dialect abstraction
# methods from the trac.db layer here.
class QueryTestCase(unittest.TestCase):
def prettifySQL(self, sql):
"""Returns a prettified version of the SQL as a list of lines to help
in creating a useful diff between two SQL statements."""
pretty = []
for line in sql.split('\n'):
pretty.extend([ "%s,\n" % x for x in line.split(',')])
return pretty
def assertEqualSQL(self, sql, correct_sql):
sql_split = self.prettifySQL(sql)
correct_sql_split = self.prettifySQL(correct_sql)
sql_diff = ''.join(list(
difflib.unified_diff(correct_sql_split, sql_split)
))
failure_message = "%r != %r\n" % (sql, correct_sql) + sql_diff
self.assertEqual(sql, correct_sql, failure_message)
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.req = Mock(href=self.env.href, authname='anonymous', tz=utc,
locale=locale_en, lc_time=locale_en)
def tearDown(self):
self.env.reset_db()
def test_all_ordered_by_id(self):
query = Query(self.env, order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id_desc(self):
query = Query(self.env, order='id', desc=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0 DESC,t.id DESC""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id_verbose(self):
query = Query(self.env, order='id', verbose=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.reporter AS reporter,t.description AS description,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id_from_unicode(self):
query = Query.from_string(self.env, u'order=id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_priority(self):
query = Query(self.env) # priority is default order
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(priority.value,'')='',%(cast_priority)s,t.id""" % {
'cast_priority': self.env.get_read_db().cast('priority.value', 'int')})
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_priority_desc(self):
query = Query(self.env, desc=1) # priority is default order
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(priority.value,'')='' DESC,%(cast_priority)s DESC,t.id""" % {
'cast_priority': self.env.get_read_db().cast('priority.value', 'int')})
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_version(self):
query = Query(self.env, order='version')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.version AS version,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
LEFT OUTER JOIN version ON (version.name=version)
ORDER BY COALESCE(t.version,'')='',COALESCE(version.time,0)=0,version.time,t.version,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_version_desc(self):
query = Query(self.env, order='version', desc=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.version AS version,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
LEFT OUTER JOIN version ON (version.name=version)
ORDER BY COALESCE(t.version,'')='' DESC,COALESCE(version.time,0)=0 DESC,version.time DESC,t.version DESC,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_milestone(self):
query = Query.from_string(self.env, 'milestone=milestone1', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.component AS component,t.time AS time,t.changetime AS changetime,t.milestone AS milestone,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.milestone,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['milestone1'], args)
tickets = query.execute(self.req)
def test_all_grouped_by_milestone(self):
query = Query(self.env, order='id', group='milestone')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.component AS component,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
ORDER BY COALESCE(t.milestone,'')='',COALESCE(milestone.completed,0)=0,milestone.completed,COALESCE(milestone.due,0)=0,milestone.due,t.milestone,COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_grouped_by_milestone_desc(self):
query = Query(self.env, order='id', group='milestone', groupdesc=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.component AS component,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
ORDER BY COALESCE(t.milestone,'')='' DESC,COALESCE(milestone.completed,0)=0 DESC,milestone.completed DESC,COALESCE(milestone.due,0)=0 DESC,milestone.due DESC,t.milestone DESC,COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_grouped_by_priority(self):
query = Query(self.env, group='priority')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.milestone AS milestone,t.component AS component,t.priority AS priority,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(priority.value,'')='',%(cast_priority)s,t.id""" % {
'cast_priority': self.env.get_read_db().cast('priority.value', 'int')})
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_milestone_not(self):
query = Query.from_string(self.env, 'milestone!=milestone1', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.milestone AS milestone,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.milestone,'')!=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['milestone1'], args)
tickets = query.execute(self.req)
def test_constrained_by_status(self):
query = Query.from_string(self.env, 'status=new|assigned|reopened',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.status AS status,t.owner AS owner,t.type AS type,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.status,'') IN (%s,%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['new', 'assigned', 'reopened'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_containing(self):
query = Query.from_string(self.env, 'owner~=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%someone%'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_not_containing(self):
query = Query.from_string(self.env, 'owner!~=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') NOT %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%someone%'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_beginswith(self):
query = Query.from_string(self.env, 'owner^=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['someone%'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_endswith(self):
query = Query.from_string(self.env, 'owner$=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%someone'], args)
tickets = query.execute(self.req)
def test_constrained_by_custom_field(self):
self.env.config.set('ticket-custom', 'foo', 'text')
query = Query.from_string(self.env, 'foo=something', order='id')
sql, args = query.get_sql()
foo = self.env.get_read_db().quote('foo')
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value,%s.value AS %s
FROM ticket AS t
LEFT OUTER JOIN ticket_custom AS %s ON (id=%s.ticket AND %s.name='foo')
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(%s.value,'')=%%s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % ((foo,) * 6))
self.assertEqual(['something'], args)
tickets = query.execute(self.req)
def test_grouped_by_custom_field(self):
self.env.config.set('ticket-custom', 'foo', 'text')
query = Query(self.env, group='foo', order='id')
sql, args = query.get_sql()
foo = self.env.get_read_db().quote('foo')
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value,%s.value AS %s
FROM ticket AS t
LEFT OUTER JOIN ticket_custom AS %s ON (id=%s.ticket AND %s.name='foo')
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(%s.value,'')='',%s.value,COALESCE(t.id,0)=0,t.id""" %
((foo,) * 7))
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_multiple_owners(self):
query = Query.from_string(self.env, 'owner=someone|someone_else',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.owner,'') IN (%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['someone', 'someone_else'], args)
tickets = query.execute(self.req)
def test_constrained_by_multiple_owners_not(self):
query = Query.from_string(self.env, 'owner!=someone|someone_else',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.owner,'') NOT IN (%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['someone', 'someone_else'], args)
tickets = query.execute(self.req)
def test_constrained_by_multiple_owners_contain(self):
query = Query.from_string(self.env, 'owner~=someone|someone_else',
order='id')
sql, args = query.get_sql()
self.assertEqual(['%someone%', '%someone/_else%'], args)
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s OR COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
tickets = query.execute(self.req)
def test_constrained_by_empty_value_contains(self):
query = Query.from_string(self.env, 'owner~=|', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_empty_value_startswith(self):
query = Query.from_string(self.env, 'owner^=|', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_empty_value_endswith(self):
query = Query.from_string(self.env, 'owner$=|', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_time_range(self):
query = Query.from_string(self.env, 'created=2008-08-01..2008-09-01', order='id')
sql, args = query.get_sql(self.req)
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.time AS time,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (((%(cast_time)s>=%%s AND %(cast_time)s<%%s)))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {
'cast_time': self.env.get_read_db().cast('t.time', 'int64')})
self.assertEqual([1217548800000000L, 1220227200000000L], args)
tickets = query.execute(self.req)
def test_constrained_by_time_range_exclusion(self):
query = Query.from_string(self.env, 'created!=2008-08-01..2008-09-01', order='id')
sql, args = query.get_sql(self.req)
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.time AS time,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((NOT (%(cast_time)s>=%%s AND %(cast_time)s<%%s)))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {
'cast_time': self.env.get_read_db().cast('t.time', 'int64')})
self.assertEqual([1217548800000000L, 1220227200000000L], args)
tickets = query.execute(self.req)
def test_constrained_by_time_range_open_right(self):
query = Query.from_string(self.env, 'created=2008-08-01..', order='id')
sql, args = query.get_sql(self.req)
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.time AS time,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((%(cast_time)s>=%%s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {
'cast_time': self.env.get_read_db().cast('t.time', 'int64')})
self.assertEqual([1217548800000000L], args)
tickets = query.execute(self.req)
def test_constrained_by_time_range_open_left(self):
query = Query.from_string(self.env, 'created=..2008-09-01', order='id')
sql, args = query.get_sql(self.req)
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.time AS time,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((%(cast_time)s<%%s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {
'cast_time': self.env.get_read_db().cast('t.time', 'int64')})
self.assertEqual([1220227200000000L], args)
tickets = query.execute(self.req)
def test_constrained_by_time_range_modified(self):
query = Query.from_string(self.env, 'modified=2008-08-01..2008-09-01', order='id')
sql, args = query.get_sql(self.req)
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.changetime AS changetime,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.time AS time,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (((%(cast_changetime)s>=%%s AND %(cast_changetime)s<%%s)))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {
'cast_changetime': self.env.get_read_db().cast('t.changetime', 'int64')})
self.assertEqual([1217548800000000L, 1220227200000000L], args)
tickets = query.execute(self.req)
def test_constrained_by_keywords(self):
query = Query.from_string(self.env, 'keywords~=foo -bar baz',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.product AS product,t.id AS id,t.summary AS summary,t.keywords AS keywords,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (((COALESCE(t.keywords,'') %(like)s AND COALESCE(t.keywords,'') NOT %(like)s AND COALESCE(t.keywords,'') %(like)s)))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%foo%', '%bar%', '%baz%'], args)
tickets = query.execute(self.req)
def test_constrained_by_milestone_or_version(self):
query = Query.from_string(self.env, 'milestone=milestone1&or&version=version1', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.component AS component,t.time AS time,t.changetime AS changetime,t.version AS version,t.milestone AS milestone,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.milestone,'')=%s)) OR ((COALESCE(t.version,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['milestone1', 'version1'], args)
tickets = query.execute(self.req)
def test_equal_in_value(self):
query = Query.from_string(self.env, r'status=this=that&version=version1',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.priority AS priority,t.milestone AS milestone,t.component AS component,t.status AS status,t.time AS time,t.changetime AS changetime,t.version AS version,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.status,'')=%s) AND (COALESCE(t.version,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['this=that', 'version1'], args)
tickets = query.execute(self.req)
def test_special_character_escape(self):
query = Query.from_string(self.env, r'status=here\&now|maybe\|later|back\slash',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.status AS status,t.owner AS owner,t.type AS type,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.status,'') IN (%s,%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['here&now', 'maybe|later', 'back\\slash'], args)
tickets = query.execute(self.req)
def test_repeated_constraint_field(self):
like_query = Query.from_string(self.env, 'owner!=someone|someone_else',
order='id')
query = Query.from_string(self.env, 'owner!=someone&owner!=someone_else',
order='id')
like_sql, like_args = like_query.get_sql()
sql, args = query.get_sql()
self.assertEqualSQL(sql, like_sql)
self.assertEqual(args, like_args)
tickets = query.execute(self.req)
def test_user_var(self):
query = Query.from_string(self.env, 'owner=$USER&order=id')
sql, args = query.get_sql(req=self.req)
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['anonymous'], args)
tickets = query.execute(self.req)
def test_csv_escape(self):
query = Mock(get_columns=lambda: ['col1'],
execute=lambda r: [{'id': 1,
'col1': 'value, needs escaped'}],
time_fields=['time', 'changetime'])
content, mimetype = QueryModule(self.env).export_csv(
Mock(href=self.env.href, perm=MockPerm()),
query)
self.assertEqual('\xef\xbb\xbfcol1\r\n"value, needs escaped"\r\n',
content)
def test_template_data(self):
req = Mock(href=self.env.href, perm=MockPerm(), authname='anonymous',
tz=None, locale=None)
context = web_context(req, 'query')
query = Query.from_string(self.env, 'owner=$USER&order=id')
tickets = query.execute(req)
data = query.template_data(context, tickets, req=req)
self.assertEqual(['anonymous'], data['clauses'][0]['owner']['values'])
query = Query.from_string(self.env, 'owner=$USER&order=id')
tickets = query.execute(req)
data = query.template_data(context, tickets)
self.assertEqual(['$USER'], data['clauses'][0]['owner']['values'])
class QueryLinksTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.query_module = QueryModule(self.env)
req = Mock(perm=MockPerm(), args={}, href=Href('/'))
self.formatter = LinkFormatter(self.env, web_context(req))
def tearDown(self):
self.env.reset_db()
def _format_link(self, query, label):
return str(self.query_module._format_link(self.formatter, 'query',
query, label))
def test_empty_query(self):
self.assertEqual(self._format_link('', 'label'),
'<em class="error">[Error: Query filter requires '
'field and constraints separated by a "="]</em>')
class TicketQueryMacroTestCase(unittest.TestCase):
def assertQueryIs(self, content, query, kwargs, format):
qs, kw, f = TicketQueryMacro.parse_args(content)
self.assertEqual(query, qs)
self.assertEqual(kwargs, kw)
self.assertEqual(format, f)
def test_owner_and_milestone(self):
self.assertQueryIs('owner=joe, milestone=milestone1',
'owner=joe&milestone=milestone1',
dict(col='status|summary', max='0', order='id'),
'list')
def test_owner_or_milestone(self):
self.assertQueryIs('owner=joe, or, milestone=milestone1',
'owner=joe&or&milestone=milestone1',
dict(col='status|summary', max='0', order='id'),
'list')
def test_format_arguments(self):
self.assertQueryIs('owner=joe, milestone=milestone1, col=component|severity, max=15, order=component, format=compact',
'owner=joe&milestone=milestone1',
dict(col='status|summary|component|severity', max='15', order='component'),
'compact')
self.assertQueryIs('owner=joe, milestone=milestone1, col=id|summary|component, max=30, order=component, format=table',
'owner=joe&milestone=milestone1',
dict(col='id|summary|component', max='30', order='component'),
'table')
def test_special_char_escaping(self):
self.assertQueryIs(r'owner=joe|jack, milestone=this\&that\|here\,now',
r'owner=joe|jack&milestone=this\&that\|here,now',
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/functional.py | trac/trac/ticket/tests/functional.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime, timedelta
from trac.test import locale_en
from trac.tests.functional import *
from trac.util.datefmt import utc, localtz, format_date, format_datetime
class TestTickets(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a ticket, comment on it, and attach a file"""
# TODO: this should be split into multiple tests
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.attach_file_to_ticket(ticketid)
class TestTicketPreview(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Preview ticket creation"""
self._tester.go_to_front()
tc.follow('New Ticket')
summary = random_sentence(5)
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-summary', summary)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('preview')
tc.url(self._tester.url + '/newticket$')
tc.find('ticket not yet created')
tc.find(summary)
tc.find(desc)
class TestTicketNoSummary(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Creating a ticket without summary should fail"""
self._tester.go_to_front()
tc.follow('New Ticket')
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('submit')
tc.find(desc)
tc.find('Tickets must contain a summary.')
tc.find('Create New Ticket')
tc.find('ticket not yet created')
class TestTicketAltFormats(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in alternative formats"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
for format in ['Comma-delimited Text', 'Tab-delimited Text',
'RSS Feed']:
tc.follow(format)
content = b.get_html()
if content.find(summary) < 0:
raise AssertionError('Summary missing from %s format' % format)
tc.back()
class TestTicketCSVFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in CSV format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
tc.follow('Comma-delimited Text')
csv = b.get_html()
if not csv.startswith('\xef\xbb\xbfid,summary,'): # BOM
raise AssertionError('Bad CSV format')
class TestTicketTabFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in Tab-delimitted format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
tc.follow('Tab-delimited Text')
tab = b.get_html()
if not tab.startswith('\xef\xbb\xbfid\tsummary\t'): # BOM
raise AssertionError('Bad tab delimitted format')
class TestTicketRSSFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in RSS format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
# Make a number of changes to exercise all of the RSS feed code
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.formvalue('propertyform', 'field-type', 'task')
tc.formvalue('propertyform', 'description', summary + '\n\n' +
random_sentence(8))
tc.formvalue('propertyform', 'field-keywords', 'key')
tc.submit('submit')
time.sleep(1) # Have to wait a second
tc.formvalue('propertyform', 'field-keywords', '')
tc.submit('submit')
tc.find('RSS Feed')
tc.follow('RSS Feed')
rss = b.get_html()
if not rss.startswith('<?xml version="1.0"?>'):
raise AssertionError('RSS Feed not valid feed')
class TestTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket search"""
summary = random_sentence(4)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', True)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.find('class="searchable">.*' + summary)
tc.notfind('No matches found')
class TestNonTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test non-ticket search"""
# Create a summary containing only unique words
summary = ' '.join([random_word() + '_TestNonTicketSearch'
for i in range(5)])
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', False)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.notfind('class="searchable">' + summary)
tc.find('No matches found')
class TestTicketHistory(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
comment = random_sentence(5)
self._tester.add_comment(ticketid, comment=comment)
self._tester.go_to_ticket(ticketid)
url = b.get_url()
tc.go(url + '?version=0')
tc.find('at <[^>]*>*Initial Version')
tc.find(summary)
tc.notfind(comment)
tc.go(url + '?version=1')
tc.find('at <[^>]*>*Version 1')
tc.find(summary)
tc.find(comment)
class TestTicketHistoryDiff(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history (diff)"""
name = 'TestTicketHistoryDiff'
ticketid = self._tester.create_ticket(name)
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'description', random_sentence(6))
tc.submit('submit')
tc.find('Description<[^>]*>\\s*modified \\(<[^>]*>diff', 's')
tc.follow('diff')
tc.find('Changes\\s*between\\s*<[^>]*>Initial Version<[^>]*>\\s*and' \
'\\s*<[^>]*>Version 1<[^>]*>\\s*of\\s*<[^>]*>Ticket #' , 's')
class TestTicketQueryLinks(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query links"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryLinks%s' % i)
for i in range(count)]
self._tester.go_to_query()
# We don't have the luxury of javascript, so this is a multi-step
# process
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_owner', 'nothing')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryLinks')
tc.submit('update')
query_url = b.get_url()
tc.find(r'\(%d matches\)' % count)
for i in range(count):
tc.find('TestTicketQueryLinks%s' % i)
tc.follow('TestTicketQueryLinks0')
tc.find('class="missing">← Previous Ticket')
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[1])
tc.follow('Back to Query')
tc.url(re.escape(query_url))
tc.follow('TestTicketQueryLinks1')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[0])
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[2])
tc.follow('Next Ticket')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[1])
tc.find('class="missing">Next Ticket →')
class TestTicketQueryOrClause(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query with an or clauses"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryOrClause%s' % i,
info={'keywords': str(i)})
for i in range(count)]
self._tester.go_to_query()
tc.formvalue('query', '0_owner', '')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryOrClause1')
tc.formvalue('query', 'add_clause_1', 'keywords')
tc.submit('add_1')
tc.formvalue('query', '1_keywords', '2')
tc.submit('update')
tc.notfind('TestTicketQueryOrClause0')
for i in [1, 2]:
tc.find('TestTicketQueryOrClause%s' % i)
class TestTicketCustomFieldTextNoFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with no format explicitly specified.
Its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
self._testenv.restart()
val = "%s %s" % (random_unique_camel(), random_word())
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % val)
class TestTicketCustomFieldTextAreaNoFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
self._testenv.restart()
val = "%s %s" % (random_unique_camel(), random_word())
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % val)
class TestTicketCustomFieldTextWikiFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `wiki` format.
Its contents should through the wiki engine, wiki-links and all.
Feature added in http://trac.edgewall.org/ticket/1791
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
wiki = '<a [^>]*>%s\??</a> %s' % (word1, word2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % wiki)
class TestTicketCustomFieldTextAreaWikiFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
wiki = '<p>\s*<a [^>]*>%s\??</a> %s<br />\s*</p>' % (word1, word2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % wiki)
class TestTicketCustomFieldTextReferenceFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `reference` format.
Its contents are treated as a single value
and are rendered as an auto-query link.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'reference')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
query = 'status=!closed&newfield=%s\+%s' % (word1, word2)
querylink = '<a href="/query\?%s">%s</a>' % (query, val)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylink)
class TestTicketCustomFieldTextListFormat(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test custom text field with `list` format.
Its contents are treated as a space-separated list of values
and are rendered as separate auto-query links per word.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
querylink1 = '<a href="/query\?%s">%s</a>' % (query1, word1)
querylink2 = '<a href="/query\?%s">%s</a>' % (query2, word2)
querylinks = '%s %s' % (querylink1, querylink2)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylinks)
class RegressionTestTicket10828(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10828
Rendered property changes should be described as lists of added and
removed items, even in the presence of comma and semicolon separators.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'A Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
self._testenv.restart()
ticketid = self._tester.create_ticket(summary=random_sentence(3))
self._tester.go_to_ticket(ticketid)
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added' % (word1, word2))
word3 = random_unique_camel()
word4 = random_unique_camel()
val = "%s, %s; %s" % (word2, word3, word4)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added; <em>%s</em> removed'
% (word3, word4, word1))
tc.formvalue('propertyform', 'field-newfield', '')
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> removed'
% (word2, word3, word4))
val = "%s %s,%s" % (word1, word2, word3)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> added'
% (word1, word2, word3))
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
query3 = 'status=!closed&newfield=~%s' % word3
querylink1 = '<a href="/query\?%s">%s</a>' % (query1, word1)
querylink2 = '<a href="/query\?%s">%s</a>' % (query2, word2)
querylink3 = '<a href="/query\?%s">%s</a>' % (query3, word3)
querylinks = '%s %s, %s' % (querylink1, querylink2, querylink3)
tc.find('<td headers="h_newfield"[^>]*>\s*%s\s*</td>' % querylinks)
class TestTimelineTicketDetails(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket details on timeline"""
env = self._testenv.get_trac_environment()
env.config.set('timeline', 'ticket_show_details', 'yes')
env.config.save()
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
self._tester.add_comment(ticketid)
self._tester.go_to_timeline()
tc.formvalue('prefs', 'ticket_details', True)
tc.submit()
htmltags = '(<[^>]*>)*'
tc.find('Ticket ' + htmltags + '#' + str(ticketid) + htmltags + ' \\(' +
summary + '\\) updated\\s+by\\s+' + htmltags + 'admin', 's')
class TestAdminComponent(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create component"""
self._tester.create_component()
class TestAdminComponentDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate component"""
name = "DuplicateMilestone"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('addcomponent', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Component .* already exists')
class TestAdminComponentRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove component"""
name = "RemovalComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'sel', name)
tc.submit('remove')
tc.notfind(name)
class TestAdminComponentNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected component"""
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.submit('remove', formname='component_table')
tc.find('No component selected')
class TestAdminComponentDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default component"""
name = "DefaultComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestAdminComponentDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin component detail"""
name = "DetailComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.follow(name)
desc = 'Some component description'
tc.formvalue('modcomp', 'description', desc)
tc.submit('cancel')
tc.url(component_url + '$')
tc.follow(name)
tc.notfind(desc)
class TestAdminMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone"""
self._tester.create_milestone()
class TestAdminMilestoneSpace(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone with a space"""
self._tester.create_milestone('Milestone 1')
class TestAdminMilestoneDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate milestone"""
name = "DuplicateMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Milestone %s already exists' % name)
tc.notfind('%s')
class TestAdminMilestoneDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone details"""
name = "DetailMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'description', 'Some description.')
tc.submit('save')
tc.url(milestone_url)
# Make sure the milestone isn't closed
self._tester.go_to_roadmap()
tc.find(name)
# Cancel more modifications
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.formvalue('modifymilestone', 'description',
'~~Some other description.~~')
tc.submit('cancel')
tc.url(milestone_url)
# Verify the correct modifications show up
self._tester.go_to_roadmap()
tc.find('Some description.')
tc.follow(name)
tc.find('Some description.')
class TestAdminMilestoneDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone duedate"""
name = "DueMilestone"
duedate = datetime.now(tz=utc)
duedate_string = format_datetime(duedate, tzinfo=utc, locale=locale_en)
self._tester.create_milestone(name, due=duedate_string)
tc.find(duedate_string)
class TestAdminMilestoneDetailDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone duedate on detail page"""
name = "DetailDueMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
duedate = datetime.now(tz=utc)
duedate_string = format_datetime(duedate, tzinfo=utc, locale=locale_en)
tc.formvalue('modifymilestone', 'due', duedate_string)
tc.submit('save')
tc.url(milestone_url + '$')
tc.find(name + '(<[^>]*>|\\s)*'+ duedate_string, 's')
class TestAdminMilestoneCompleted(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed"""
name = "CompletedMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
tc.submit('save')
tc.url(milestone_url + "$")
class TestAdminMilestoneCompletedFuture(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed in the future"""
name = "CompletedFutureMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
cdate = datetime.now(tz=utc) + timedelta(days=1)
cdate_string = format_date(cdate, tzinfo=localtz, locale=locale_en)
tc.formvalue('modifymilestone', 'completeddate', cdate_string)
tc.submit('save')
tc.find('Completion date may not be in the future')
# And make sure it wasn't marked as completed.
self._tester.go_to_roadmap()
tc.find(name)
class TestAdminMilestoneRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove milestone"""
name = "MilestoneRemove"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.url(milestone_url + '$')
tc.notfind(name)
class TestAdminMilestoneRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple milestones"""
name = "MultiRemoveMilestone"
count = 3
for i in range(count):
self._tester.create_milestone("%s%s" % (name, i))
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.url(milestone_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('milestone_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(milestone_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminMilestoneNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected milestone"""
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.submit('remove', formname='milestone_table')
tc.find('No milestone selected')
class TestAdminMilestoneDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default milestone"""
name = "DefaultMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestAdminPriority(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create priority"""
self._tester.create_priority()
class TestAdminPriorityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate priority"""
name = "DuplicatePriority"
self._tester.create_priority(name)
self._tester.create_priority(name)
tc.find('Priority %s already exists' % name)
class TestAdminPriorityModify(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority"""
name = "ModifyPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.follow(name)
tc.formvalue('modenum', 'name', name * 2)
tc.submit('save')
tc.url(priority_url + '$')
tc.find(name * 2)
class TestAdminPriorityRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove priority"""
name = "RemovePriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'sel', name)
tc.submit('remove')
tc.url(priority_url + '$')
tc.notfind(name)
class TestAdminPriorityRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple priorities"""
name = "MultiRemovePriority"
count = 3
for i in range(count):
self._tester.create_priority("%s%s" % (name, i))
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('enumtable', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(priority_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminPriorityNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected priority"""
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.submit('remove', formname='enumtable')
tc.find('No priority selected')
class TestAdminPriorityDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin default priority"""
name = "DefaultPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'default', name)
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('radio.*"%s"\\schecked="checked"' % name)
class TestAdminPriorityDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority details"""
name = "DetailPriority"
# Create a priority
self._tester.create_priority(name + '1')
# Modify the details of the priority
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.url(priority_url + '$')
tc.follow(name + '1')
tc.url(priority_url + '/' + name + '1')
tc.formvalue('modenum', 'name', name + '2')
tc.submit('save')
tc.url(priority_url + '$')
# Cancel more modifications
tc.go(priority_url)
tc.follow(name)
tc.formvalue('modenum', 'name', name + '3')
tc.submit('cancel')
tc.url(priority_url + '$')
# Verify that only the correct modifications show up
tc.notfind(name + '1')
tc.find(name + '2')
tc.notfind(name + '3')
class TestAdminPriorityRenumber(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin renumber priorities"""
valuesRE = re.compile('<select name="value_([0-9]+)">', re.M)
html = b.get_html()
max_priority = max([int(x) for x in valuesRE.findall(html)])
name = "RenumberPriority"
self._tester.create_priority(name + '1')
self._tester.create_priority(name + '2')
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name + '1')
tc.find(name + '2')
tc.formvalue('enumtable', 'value_%s' % (max_priority + 1), str(max_priority + 2))
tc.formvalue('enumtable', 'value_%s' % (max_priority + 2), str(max_priority + 1))
tc.submit('apply')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/model.py | trac/trac/ticket/tests/model.py | from __future__ import with_statement
from datetime import datetime, timedelta
import os.path
from StringIO import StringIO
import tempfile
import shutil
import unittest
from trac import core
from trac.attachment import Attachment
from trac.core import TracError, implements
from trac.resource import ResourceNotFound
from trac.ticket.model import (
Ticket, Component, Milestone, Priority, Type, Version
)
from trac.ticket.api import (
IMilestoneChangeListener, ITicketChangeListener, TicketSystem
)
from trac.test import EnvironmentStub
from trac.tests.resource import TestResourceChangeListener
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
class TestTicketChangeListener(core.Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
self.action = 'created'
self.ticket = ticket
self.resource = ticket.resource
def ticket_changed(self, ticket, comment, author, old_values):
self.action = 'changed'
self.ticket = ticket
self.comment = comment
self.author = author
self.old_values = old_values
def ticket_deleted(self, ticket):
self.action = 'deleted'
self.ticket = ticket
class TicketTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'cbon', 'checkbox')
self.env.config.set('ticket-custom', 'cboff', 'checkbox')
def tearDown(self):
self.env.reset_db()
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def _create_a_ticket(self):
# 1. Creating ticket
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['foo'] = 'This is a custom field'
return ticket
def test_invalid_ticket_id(self):
self.assertEqual(Ticket.id_is_valid(-1), False)
self.assertEqual(Ticket.id_is_valid(0), False)
self.assertEqual(Ticket.id_is_valid(1), True)
self.assertEqual(Ticket.id_is_valid(1L << 31), True)
self.assertEqual(Ticket.id_is_valid(1L << 32), False)
self.assertRaises(ResourceNotFound, Ticket, self.env, -1)
self.assertRaises(ResourceNotFound, Ticket, self.env, 1L << 32)
def test_create_ticket_1(self):
ticket = self._create_a_ticket()
self.assertEqual('santa', ticket['reporter'])
self.assertEqual('Foo', ticket['summary'])
self.assertEqual('This is a custom field', ticket['foo'])
ticket.insert()
def test_create_ticket_2(self):
ticket = self._create_a_ticket()
ticket.insert()
self.assertEqual(1, ticket.id)
# Retrieving ticket
ticket2 = Ticket(self.env, 1)
self.assertEqual(1, ticket2.id)
self.assertEqual('santa', ticket2['reporter'])
self.assertEqual('Foo', ticket2['summary'])
self.assertEqual('This is a custom field', ticket2['foo'])
def _modify_a_ticket(self):
ticket2 = self._create_a_ticket()
ticket2.insert()
ticket2['summary'] = 'Bar'
ticket2['foo'] = 'New value'
ticket2.save_changes('santa', 'this is my comment')
return ticket2
def test_create_ticket_3(self):
self._modify_a_ticket()
# Retrieving ticket
ticket3 = Ticket(self.env, 1)
self.assertEqual(1, ticket3.id)
self.assertEqual(ticket3['reporter'], 'santa')
self.assertEqual(ticket3['summary'], 'Bar')
self.assertEqual(ticket3['foo'], 'New value')
def test_create_ticket_4(self):
ticket3 = self._modify_a_ticket()
# Testing get_changelog()
log = ticket3.get_changelog()
self.assertEqual(len(log), 3)
ok_vals = ['foo', 'summary', 'comment']
self.failUnless(log[0][2] in ok_vals)
self.failUnless(log[1][2] in ok_vals)
self.failUnless(log[2][2] in ok_vals)
def test_create_ticket_5(self):
ticket3 = self._modify_a_ticket()
# Testing delete()
ticket3.delete()
log = ticket3.get_changelog()
self.assertEqual(len(log), 0)
self.assertRaises(TracError, Ticket, self.env, 1)
def test_ticket_id_is_always_int(self):
ticket_id = self._insert_ticket('Foo')
self.assertEqual(ticket_id, int(ticket_id))
ticket = Ticket(self.env, str(ticket_id))
self.assertEqual(ticket_id, ticket.id)
self.assertEqual(ticket.resource.id, ticket_id)
def test_can_save_ticket_without_explicit_comment(self):
ticket = Ticket(self.env)
ticket.insert()
ticket['summary'] = 'another summary'
ticket.save_changes('foo')
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][0]
self.assertEqual('1', comment_change[3])
self.assertEqual('', comment_change[4])
def test_can_save_ticket_without_explicit_username(self):
ticket = Ticket(self.env)
ticket.insert()
ticket['summary'] = 'another summary'
ticket.save_changes()
for change in ticket.get_changelog():
self.assertEqual(None, change[1])
def test_comment_with_whitespace_only_is_not_saved(self):
ticket = Ticket(self.env)
ticket.insert()
ticket.save_changes(comment='\n \n ')
self.assertEqual(0, len(ticket.get_changelog()))
def test_prop_whitespace_change_is_not_saved(self):
ticket = Ticket(self.env)
ticket.populate({'summary': 'ticket summary'})
ticket.insert()
ticket['summary'] = ' ticket summary '
ticket.save_changes()
self.assertEqual(0, len(ticket.get_changelog()))
def test_ticket_default_values(self):
"""
Verify that a ticket uses default values specified in the configuration
when created.
"""
# Set defaults for some standard fields
self.env.config.set('ticket', 'default_type', 'defect')
self.env.config.set('ticket', 'default_component', 'component1')
# Add a custom field of type 'text' with a default value
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'foo.value', 'Something')
# Add a custom field of type 'select' with a default value specified as
# the value itself
self.env.config.set('ticket-custom', 'bar', 'select')
self.env.config.set('ticket-custom', 'bar.options', 'one|two|three')
self.env.config.set('ticket-custom', 'bar.value', 'two')
# Add a custom field of type 'select' with a default value specified as
# index into the options list
self.env.config.set('ticket-custom', 'baz', 'select')
self.env.config.set('ticket-custom', 'baz.options', 'one|two|three')
self.env.config.set('ticket-custom', 'baz.value', '2')
ticket = Ticket(self.env)
self.assertEqual('defect', ticket['type'])
self.assertEqual('component1', ticket['component'])
self.assertEqual('Something', ticket['foo'])
self.assertEqual('two', ticket['bar'])
self.assertEqual('three', ticket['baz'])
def test_set_field_stripped(self):
"""
Verify that whitespace around ticket fields is stripped, except for
textarea fields.
"""
ticket = Ticket(self.env)
ticket['component'] = ' foo '
ticket['description'] = ' bar '
self.assertEqual('foo', ticket['component'])
self.assertEqual(' bar ', ticket['description'])
def test_set_field_multi(self):
"""
Ticket fields can't yet be multi-valued
"""
ticket = Ticket(self.env)
def set_multi_valued():
ticket['component'] = [' foo ', ' bar ']
self.assertRaises(TracError, set_multi_valued)
def test_owner_from_component(self):
"""
Verify that the owner of a new ticket is set to the owner of the
component.
"""
component = Component(self.env)
component.name = 'test'
component.owner = 'joe'
component.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test'
ticket.insert()
self.assertEqual('joe', ticket['owner'])
def test_owner_from_changed_component(self):
"""
Verify that the owner of a new ticket is updated when the component is
changed.
"""
component1 = Component(self.env)
component1.name = 'test1'
component1.owner = 'joe'
component1.insert()
component2 = Component(self.env)
component2.name = 'test2'
component2.owner = 'kate'
component2.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test1'
ticket['status'] = 'new'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
ticket['component'] = 'test2'
ticket.save_changes('jane', 'Testing')
self.assertEqual('kate', ticket['owner'])
def test_no_disown_from_changed_component(self):
"""
Verify that a ticket is not disowned when the component is changed to
a non-assigned component.
"""
component1 = Component(self.env)
component1.name = 'test1'
component1.owner = 'joe'
component1.insert()
component2 = Component(self.env)
component2.name = 'test2'
component2.owner = ''
component2.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test1'
ticket['status'] = 'new'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
ticket['component'] = 'test2'
ticket.save_changes('jane', 'Testing')
self.assertEqual('joe', ticket['owner'])
def test_populate_ticket(self):
data = {'summary': 'Hello world', 'reporter': 'john',
'foo': 'bar', 'checkbox_cbon': '', 'cbon': 'on',
'checkbox_cboff': ''}
ticket = Ticket(self.env)
ticket.populate(data)
# Standard fields
self.assertEqual('Hello world', ticket['summary'])
self.assertEqual('john', ticket['reporter'])
# An unknown field
assert ticket['bar'] is None
# Custom field
self.assertEqual('bar', ticket['foo'])
# Custom field of type 'checkbox'
self.assertEqual('on', ticket['cbon'])
self.assertEqual('0', ticket['cboff'])
def test_changelog(self):
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo',
milestone='bar')
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'bar'
ticket['milestone'] = 'foo'
now = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', now)
changelog = sorted(ticket.get_changelog())
self.assertEqual([(now, 'jane', 'comment', '1', 'Testing', True),
(now, 'jane', 'component', 'foo', 'bar', True),
(now, 'jane', 'milestone', 'bar', 'foo', True)],
changelog)
def test_changelog_with_attachment(self):
"""Verify ordering of attachments and comments in the changelog."""
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', t1)
t2 = datetime(2001, 1, 1, 1, 1, 2, 0, utc)
self.env.db_transaction("""
INSERT INTO attachment (type, id, filename, size, time,
description, author, ipnr)
VALUES ('ticket',%s,'file.txt',1234,%s, 'My file','mark','')
""", (str(tkt_id), to_utimestamp(t2)))
t3 = datetime(2001, 1, 1, 1, 1, 3, 0, utc)
ticket.save_changes('jim', 'Other', t3)
log = ticket.get_changelog()
self.assertEqual(4, len(log))
self.assertEqual((t1, 'jane', 'comment', '1', 'Testing', True), log[0])
self.assertEqual([(t2, 'mark', 'attachment', '', 'file.txt', False),
(t2, 'mark', 'comment', '', 'My file', False)],
sorted(log[1:3]))
self.assertEqual((t3, 'jim', 'comment', '2', 'Other', True), log[3])
def test_subsecond_change(self):
"""Perform two ticket changes within a second."""
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
t1 = datetime(2001, 1, 1, 1, 1, 1, 123456, utc)
ticket.save_changes('jane', 'Testing', t1)
t2 = datetime(2001, 1, 1, 1, 1, 1, 123789, utc)
ticket.save_changes('jim', 'Other', t2)
log = ticket.get_changelog()
self.assertEqual(2, len(log))
self.assertEqual((t1, 'jane', 'comment', '1', 'Testing', True), log[0])
self.assertEqual((t2, 'jim', 'comment', '2', 'Other', True), log[1])
def test_changelog_with_reverted_change(self):
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'bar'
ticket['component'] = 'foo'
now = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', now)
self.assertEqual([(now, 'jane', 'comment', '1', 'Testing', True)],
list(ticket.get_changelog()))
def test_change_listener_created(self):
listener = TestTicketChangeListener(self.env)
ticket = self._create_a_ticket()
ticket.insert()
self.assertEqual('created', listener.action)
self.assertEqual(ticket, listener.ticket)
self.assertEqual(ticket.id, ticket.resource.id)
def test_change_listener_changed(self):
listener = TestTicketChangeListener(self.env)
data = {'component': 'foo', 'milestone': 'bar'}
tkt_id = self._insert_ticket('Hello World', reporter='john', **data)
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'new component'
ticket['milestone'] = 'new milestone'
comment = 'changing ticket'
ticket.save_changes('author', comment)
self.assertEqual('changed', listener.action)
self.assertEqual(comment, listener.comment)
self.assertEqual('author', listener.author)
for key, value in data.iteritems():
self.assertEqual(value, listener.old_values[key])
def test_change_listener_deleted(self):
listener = TestTicketChangeListener(self.env)
ticket = self._create_a_ticket()
ticket.insert()
ticket.delete()
self.assertEqual('deleted', listener.action)
self.assertEqual(ticket, listener.ticket)
class TicketCommentTestCase(unittest.TestCase):
def _insert_ticket(self, summary, when, **kwargs):
ticket = Ticket(self.env)
for k, v in kwargs.iteritems():
ticket[k] = v
self.id = ticket.insert(when)
def _modify_ticket(self, author, comment, when, cnum, **kwargs):
ticket = Ticket(self.env, self.id)
for k, v in kwargs.iteritems():
ticket[k] = v
ticket.save_changes(author, comment, when, cnum=cnum)
def _find_change(self, ticket, cnum):
(ts, author, comment) = ticket._find_change(cnum)
return from_utimestamp(ts)
def assertChange(self, ticket, cnum, date, author, **fields):
change = ticket.get_change(cnum=cnum)
self.assertEqual(dict(date=date, author=author, fields=fields), change)
class TicketCommentEditTestCase(TicketCommentTestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.created = datetime(2001, 1, 1, 1, 0, 0, 0, utc)
self._insert_ticket('Test ticket', self.created,
owner='john', keywords='a, b, c')
self.t1 = self.created + timedelta(seconds=1)
self._modify_ticket('jack', 'Comment 1', self.t1, '1')
self.t2 = self.created + timedelta(seconds=2)
self._modify_ticket('john', 'Comment 2', self.t2, '1.2',
owner='jack')
self.t3 = self.created + timedelta(seconds=3)
self._modify_ticket('jim', 'Comment 3', self.t3, '3',
keywords='a, b')
def tearDown(self):
self.env.reset_db()
def test_modify_comment(self):
"""Check modification of a "standalone" comment"""
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Comment 1'))
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='1.2', new='Comment 2'))
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='3', new='Comment 3'))
t = self.created + timedelta(seconds=10)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'New comment 1', t)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='New comment 1'),
_comment0=dict(author='joe', old='Comment 1',
new=str(to_utimestamp(t))))
self.assertEqual(t, Ticket(self.env, self.id)['changetime'])
def test_threading(self):
"""Check modification of a "threaded" comment"""
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=20)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='1.2', new='New comment 2'),
_comment0=dict(author='joe', old='Comment 2',
new=str(to_utimestamp(t))))
def test_modify_missing_cnum(self):
"""Editing a comment with no cnum in oldvalue"""
self.env.db_transaction(
"UPDATE ticket_change SET oldvalue='' WHERE oldvalue='3'")
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=30)
ticket.modify_comment(self._find_change(ticket, 3),
'joe', 'New comment 3', t)
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='', new='New comment 3'),
_comment0=dict(author='joe', old='Comment 3',
new=str(to_utimestamp(t))))
def test_modify_missing_comment(self):
"""Editing a comment where the comment field is missing"""
self.env.db_transaction("""
DELETE FROM ticket_change WHERE field='comment' AND oldvalue='1.2'
""")
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=40)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='', new='New comment 2'),
_comment0=dict(author='joe', old='',
new=str(to_utimestamp(t))))
def test_modify_missing_cnums_and_comment(self):
"""Editing a comment when all cnums are missing and one comment
field is missing
"""
with self.env.db_transaction as db:
db("UPDATE ticket_change SET oldvalue='' WHERE oldvalue='1'")
db("""DELETE FROM ticket_change
WHERE field='comment' AND oldvalue='1.2'""")
db("UPDATE ticket_change SET oldvalue='' WHERE oldvalue='3'")
# Modify after missing comment
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=50)
ticket.modify_comment(self._find_change(ticket, 3),
'joe', 'New comment 3', t)
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='', new='New comment 3'),
_comment0=dict(author='joe', old='Comment 3',
new=str(to_utimestamp(t))))
# Modify missing comment
t = self.created + timedelta(seconds=60)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='', new='New comment 2'),
_comment0=dict(author='joe', old='',
new=str(to_utimestamp(t))))
def test_missing_comment_edit(self):
"""Modify a comment where one edit is missing"""
ticket = Ticket(self.env, self.id)
t1 = self.created + timedelta(seconds=70)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'New comment 1', t1)
t2 = self.created + timedelta(seconds=80)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'Other comment 1', t2)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Other comment 1'),
_comment0=dict(author='joe', old='Comment 1',
new=str(to_utimestamp(t1))),
_comment1=dict(author='joe', old='New comment 1',
new=str(to_utimestamp(t2))))
self.env.db_transaction(
"DELETE FROM ticket_change WHERE field='_comment0'")
t3 = self.created + timedelta(seconds=90)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'Newest comment 1', t3)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Newest comment 1'),
_comment1=dict(author='joe', old='New comment 1',
new=str(to_utimestamp(t2))),
_comment2=dict(author='joe', old='Other comment 1',
new=str(to_utimestamp(t3))))
def test_comment_history(self):
"""Check the generation of the comment history"""
ticket = Ticket(self.env, self.id)
t = [self.t1]
for i in range(1, 32):
t.append(self.created + timedelta(minutes=i))
ticket.modify_comment(self._find_change(ticket, 1),
'joe (%d)' % i,
'Comment 1 (%d)' % i, t[-1])
history = ticket.get_comment_history(cnum=1)
self.assertEqual((0, t[0], 'jack', 'Comment 1'), history[0])
for i in range(1, len(history)):
self.assertEqual((i, t[i], 'joe (%d)' % i,
'Comment 1 (%d)' % i), history[i])
history = ticket.get_comment_history(cdate=self.t1)
self.assertEqual((0, t[0], 'jack', 'Comment 1'), history[0])
for i in range(1, len(history)):
self.assertEqual((i, t[i], 'joe (%d)' % i,
'Comment 1 (%d)' % i), history[i])
class TicketCommentDeleteTestCase(TicketCommentTestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('ticket-custom', 'foo', 'text')
self.created = datetime(2001, 1, 1, 1, 0, 0, 0, utc)
self._insert_ticket('Test ticket', self.created,
owner='john', keywords='a, b, c', foo='initial')
self.t1 = self.created + timedelta(seconds=1)
self._modify_ticket('jack', 'Comment 1', self.t1, '1',
foo='change 1')
self.t2 = self.created + timedelta(seconds=2)
self._modify_ticket('john', 'Comment 2', self.t2, '1.2',
owner='jack', foo='change2')
self.t3 = self.created + timedelta(seconds=3)
self._modify_ticket('jim', 'Comment 3', self.t3, '3',
keywords='a, b', foo='change3')
self.t4 = self.created + timedelta(seconds=4)
self._modify_ticket('joe', 'Comment 4', self.t4, '4',
keywords='a', foo='change4')
def tearDown(self):
self.env.reset_db()
def test_delete_last_comment(self):
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
t = datetime.now(utc)
ticket.delete_change(cnum=4, when=t)
self.assertEqual('a, b', ticket['keywords'])
self.assertEqual('change3', ticket['foo'])
self.assertEqual(None, ticket.get_change(cnum=4))
self.assertNotEqual(None, ticket.get_change(cnum=3))
self.assertEqual(t, ticket.time_changed)
def test_delete_last_comment_when_custom_field_gone(self):
"""Regression test for http://trac.edgewall.org/ticket/10858"""
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
# we simulate the removal of the definition of the 'foo' custom field
self.env.config.remove('ticket-custom', 'foo')
del TicketSystem(self.env).fields
del TicketSystem(self.env).custom_fields
ticket = Ticket(self.env, self.id)
#
t = datetime.now(utc)
ticket.delete_change(cnum=4, when=t)
self.assertEqual('a, b', ticket['keywords'])
# 'foo' is no longer defined for the ticket
self.assertEqual(None, ticket['foo'])
# however, 'foo=change3' is still in the database
self.assertEqual([('change3',)], self.env.db_query("""
SELECT value FROM ticket_custom WHERE ticket=%s AND name='foo'
""", (self.id,)))
self.assertEqual(None, ticket.get_change(cnum=4))
self.assertNotEqual(None, ticket.get_change(cnum=3))
self.assertEqual(t, ticket.time_changed)
def test_delete_last_comment_by_date(self):
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
t = datetime.now(utc)
ticket.delete_change(cdate=self.t4, when=t)
self.assertEqual('a, b', ticket['keywords'])
self.assertEqual('change3', ticket['foo'])
self.assertEqual(None, ticket.get_change(cdate=self.t4))
self.assertNotEqual(None, ticket.get_change(cdate=self.t3))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment(self):
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
t = datetime.now(utc)
ticket.delete_change(cnum=3, when=t)
self.assertEqual(None, ticket.get_change(cnum=3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b, c', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment_by_date(self):
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
t = datetime.now(utc)
ticket.delete_change(cdate=self.t3, when=t)
self.assertEqual(None, ticket.get_change(cdate=self.t3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b, c', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment_inconsistent(self):
# Make oldvalue on keywords for change 4 inconsistent. This should
# result in no change in oldvalue when deleting change 3. The
# oldvalue of foo should change normally.
self.env.db_transaction("""
UPDATE ticket_change SET oldvalue='1, 2'
WHERE field='keywords' AND oldvalue='a, b'
""")
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='1, 2', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
ticket.delete_change(3)
self.assertEqual(None, ticket.get_change(3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='1, 2', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
def test_delete_all_comments(self):
ticket = Ticket(self.env, self.id)
ticket.delete_change(4)
ticket.delete_change(3)
ticket.delete_change(2)
t = datetime.now(utc)
ticket.delete_change(1, when=t)
self.assertEqual(t, ticket.time_changed)
class EnumTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_priority_fetch(self):
prio = Priority(self.env, 'major')
self.assertEqual(prio.name, 'major')
self.assertEqual(prio.value, '3')
def test_priority_insert(self):
prio = Priority(self.env)
prio.name = 'foo'
prio.insert()
self.assertEqual(True, prio.exists)
def test_priority_insert_with_value(self):
prio = Priority(self.env)
prio.name = 'bar'
prio.value = 100
prio.insert()
self.assertEqual(True, prio.exists)
def test_priority_update(self):
prio = Priority(self.env, 'major')
prio.name = 'foo'
prio.update()
Priority(self.env, 'foo')
self.assertRaises(TracError, Priority, self.env, 'major')
def test_priority_delete(self):
prio = Priority(self.env, 'major')
self.assertEqual('3', prio.value)
prio.delete()
self.assertEqual(False, prio.exists)
self.assertRaises(TracError, Priority, self.env, 'major')
prio = Priority(self.env, 'minor')
self.assertEqual('3', prio.value)
def test_ticket_type_update(self):
tkttype = Type(self.env, 'task')
self.assertEqual(tkttype.name, 'task')
self.assertEqual(tkttype.value, '3')
tkttype.name = 'foo'
tkttype.update()
Type(self.env, 'foo')
class TestMilestoneChangeListener(core.Component):
implements(IMilestoneChangeListener)
def milestone_created(self, milestone):
self.action = 'created'
self.milestone = milestone
def milestone_changed(self, milestone, old_values):
self.action = 'changed'
self.milestone = milestone
self.old_values = old_values
def milestone_deleted(self, milestone):
self.action = 'deleted'
self.milestone = milestone
class MilestoneTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/conversion.py | trac/trac/ticket/tests/conversion.py | import os
import unittest
from trac import __version__ as TRAC_VERSION
from trac.test import EnvironmentStub, Mock
from trac.ticket.model import Ticket
from trac.ticket.web_ui import TicketModule
from trac.mimeview.api import Mimeview
from trac.web.href import Href
class TicketConversionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.env.config.set('trac', 'templates_dir',
os.path.join(os.path.dirname(self.env.path),
'templates'))
self.ticket_module = TicketModule(self.env)
self.mimeview = Mimeview(self.env)
self.req = Mock(base_path='/trac.cgi', path_info='',
href=Href('/trac.cgi'), chrome={'logo': {}},
abs_href=Href('http://example.org/trac.cgi'),
environ={}, perm=[], authname='-', args={}, tz=None,
locale='', session=None, form_token=None)
def tearDown(self):
self.env.reset_db()
def _create_a_ticket(self):
# 1. Creating ticket
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['description'] = 'Bar'
ticket['foo'] = 'This is a custom field'
ticket.insert()
return ticket
def test_conversions(self):
conversions = self.mimeview.get_supported_conversions(
'trac.ticket.Ticket')
expected = sorted([('csv', 'Comma-delimited Text', 'csv',
'trac.ticket.Ticket', 'text/csv', 8,
self.ticket_module),
('tab', 'Tab-delimited Text', 'tsv',
'trac.ticket.Ticket', 'text/tab-separated-values', 8,
self.ticket_module),
('rss', 'RSS Feed', 'xml',
'trac.ticket.Ticket', 'application/rss+xml', 8,
self.ticket_module)],
key=lambda i: i[-1], reverse=True)
self.assertEqual(expected, conversions)
def test_csv_conversion(self):
ticket = self._create_a_ticket()
csv = self.mimeview.convert_content(self.req, 'trac.ticket.Ticket',
ticket, 'csv')
self.assertEqual(('\xef\xbb\xbf'
'id,summary,reporter,owner,description,status,'
'keywords,cc\r\n1,Foo,santa,,Bar,,,\r\n',
'text/csv;charset=utf-8', 'csv'), csv)
def test_tab_conversion(self):
ticket = self._create_a_ticket()
csv = self.mimeview.convert_content(self.req, 'trac.ticket.Ticket',
ticket, 'tab')
self.assertEqual(('\xef\xbb\xbf'
'id\tsummary\treporter\towner\tdescription\tstatus\t'
'keywords\tcc\r\n1\tFoo\tsanta\t\tBar\t\t\t\r\n',
'text/tab-separated-values;charset=utf-8', 'tsv'),
csv)
def test_rss_conversion(self):
ticket = self._create_a_ticket()
content, mimetype, ext = self.mimeview.convert_content(
self.req, 'trac.ticket.Ticket', ticket, 'rss')
self.assertEqual(("""<?xml version="1.0"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
<channel>
<title>My Project: Ticket #1: Foo</title>
<link>http://example.org/trac.cgi/ticket/1</link>
<description><p>
Bar
</p>
</description>
<language>en-us</language>
<generator>Trac %s</generator>
</channel>
</rss>""" % (TRAC_VERSION),
'application/rss+xml', 'xml'),
(content.replace('\r', ''), mimetype, ext))
def suite():
return unittest.makeSuite(TicketConversionTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/__init__.py | trac/trac/ticket/tests/__init__.py | import doctest
import unittest
import trac.ticket
from trac.ticket.tests import api, model, query, wikisyntax, notification, \
conversion, report, roadmap, batch
from trac.ticket.tests.functional import functionalSuite
def suite():
suite = unittest.TestSuite()
suite.addTest(api.suite())
suite.addTest(model.suite())
suite.addTest(query.suite())
suite.addTest(wikisyntax.suite())
suite.addTest(notification.suite())
suite.addTest(conversion.suite())
suite.addTest(report.suite())
suite.addTest(roadmap.suite())
suite.addTest(batch.suite())
suite.addTest(doctest.DocTestSuite(trac.ticket.api))
suite.addTest(doctest.DocTestSuite(trac.ticket.report))
suite.addTest(doctest.DocTestSuite(trac.ticket.roadmap))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/ticket/tests/notification.py | trac/trac/ticket/tests/notification.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Include a basic SMTP server, based on L. Smithson
# (lsmithson@open-networks.co.uk) extensible Python SMTP Server
#
from trac.util.datefmt import utc
from trac.ticket.model import Ticket
from trac.ticket.notification import TicketNotifyEmail
from trac.test import EnvironmentStub, Mock, MockPerm
from trac.tests.notification import SMTPThreadedServer, parse_smtp_message, \
smtp_address
import base64
from datetime import datetime
import os
import quopri
import re
import unittest
SMTP_TEST_PORT = 7000 + os.getpid() % 1000
MAXBODYWIDTH = 76
notifysuite = None
class NotificationTestCase(unittest.TestCase):
"""Notification test cases that send email over SMTP"""
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('project', 'name', 'TracTest')
self.env.config.set('notification', 'smtp_enabled', 'true')
self.env.config.set('notification', 'always_notify_owner', 'true')
self.env.config.set('notification', 'always_notify_reporter', 'true')
self.env.config.set('notification', 'smtp_always_cc',
'joe.user@example.net, joe.bar@example.net')
self.env.config.set('notification', 'use_public_cc', 'true')
self.env.config.set('notification', 'smtp_port', str(SMTP_TEST_PORT))
self.env.config.set('notification', 'smtp_server','localhost')
self.req = Mock(href=self.env.href, abs_href=self.env.abs_href, tz=utc,
perm=MockPerm())
def tearDown(self):
"""Signal the notification test suite that a test is over"""
notifysuite.tear_down()
self.env.reset_db()
def test_recipients(self):
"""To/Cc recipients"""
ticket = Ticket(self.env)
ticket['reporter'] = '"Joe User" < joe.user@example.org >'
ticket['owner'] = 'joe.user@example.net'
ticket['cc'] = 'joe.user@example.com, joe.bar@example.org, ' \
'joe.bar@example.net'
ticket['summary'] = 'Foo'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
recipients = notifysuite.smtpd.get_recipients()
# checks there is no duplicate in the recipient list
rcpts = []
for r in recipients:
self.failIf(r in rcpts)
rcpts.append(r)
# checks that all cc recipients have been notified
cc_list = self.env.config.get('notification', 'smtp_always_cc')
cc_list = "%s, %s" % (cc_list, ticket['cc'])
for r in cc_list.replace(',', ' ').split():
self.failIf(r not in recipients)
# checks that owner has been notified
self.failIf(smtp_address(ticket['owner']) not in recipients)
# checks that reporter has been notified
self.failIf(smtp_address(ticket['reporter']) not in recipients)
def test_no_recipient(self):
"""No recipient case"""
self.env.config.set('notification', 'smtp_always_cc', '')
ticket = Ticket(self.env)
ticket['reporter'] = 'anonymous'
ticket['summary'] = 'Foo'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
sender = notifysuite.smtpd.get_sender()
recipients = notifysuite.smtpd.get_recipients()
message = notifysuite.smtpd.get_message()
# checks that no message has been sent
self.failIf(recipients)
self.failIf(sender)
self.failIf(message)
def test_cc_only(self):
"""Notification w/o explicit recipients but Cc: (#3101)"""
ticket = Ticket(self.env)
ticket['summary'] = 'Foo'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
recipients = notifysuite.smtpd.get_recipients()
# checks that all cc recipients have been notified
cc_list = self.env.config.get('notification', 'smtp_always_cc')
for r in cc_list.replace(',', ' ').split():
self.failIf(r not in recipients)
def test_structure(self):
"""Basic SMTP message structure (headers, body)"""
ticket = Ticket(self.env)
ticket['reporter'] = '"Joe User" <joe.user@example.org>'
ticket['owner'] = 'joe.user@example.net'
ticket['cc'] = 'joe.user@example.com, joe.bar@example.org, ' \
'joe.bar@example.net'
ticket['summary'] = 'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# checks for header existence
self.failIf(not headers)
# checks for body existance
self.failIf(not body)
# checks for expected headers
self.failIf('Date' not in headers)
self.failIf('Subject' not in headers)
self.failIf('Message-ID' not in headers)
self.failIf('From' not in headers)
def test_date(self):
"""Date format compliance (RFC822)
we do not support 'military' format"""
date_str = r"^((?P<day>\w{3}),\s*)*(?P<dm>\d{2})\s+" \
r"(?P<month>\w{3})\s+(?P<year>\d{4})\s+" \
r"(?P<hour>\d{2}):(?P<min>[0-5][0-9])" \
r"(:(?P<sec>[0-5][0-9]))*\s" \
r"((?P<tz>\w{2,3})|(?P<offset>[+\-]\d{4}))$"
date_re = re.compile(date_str)
# python time module does not detect incorrect time values
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', \
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
tz = ['UT', 'GMT', 'EST', 'EDT', 'CST', 'CDT', 'MST', 'MDT',
'PST', 'PDT']
ticket = Ticket(self.env)
ticket['reporter'] = '"Joe User" <joe.user@example.org>'
ticket['summary'] = 'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.failIf('Date' not in headers)
mo = date_re.match(headers['Date'])
self.failIf(not mo)
if mo.group('day'):
self.failIf(mo.group('day') not in days)
self.failIf(int(mo.group('dm')) not in range(1, 32))
self.failIf(mo.group('month') not in months)
self.failIf(int(mo.group('hour')) not in range(0, 24))
if mo.group('tz'):
self.failIf(mo.group('tz') not in tz)
def test_bcc_privacy(self):
"""Visibility of recipients"""
def run_bcc_feature(public):
# CC list should be private
self.env.config.set('notification', 'use_public_cc',
'true' if public else 'false')
self.env.config.set('notification', 'smtp_always_bcc',
'joe.foobar@example.net')
ticket = Ticket(self.env)
ticket['reporter'] = '"Joe User" <joe.user@example.org>'
ticket['summary'] = 'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
if public:
# Msg should have a To list
self.failIf('To' not in headers)
# Extract the list of 'To' recipients from the message
to = [rcpt.strip() for rcpt in headers['To'].split(',')]
else:
# Msg should not have a To list
self.failIf('To' in headers)
# Extract the list of 'To' recipients from the message
to = []
# Extract the list of 'Cc' recipients from the message
cc = [rcpt.strip() for rcpt in headers['Cc'].split(',')]
# Extract the list of the actual SMTP recipients
rcptlist = notifysuite.smtpd.get_recipients()
# Build the list of the expected 'Cc' recipients
ccrcpt = self.env.config.get('notification', 'smtp_always_cc')
cclist = [ccr.strip() for ccr in ccrcpt.split(',')]
for rcpt in cclist:
# Each recipient of the 'Cc' list should appear
# in the 'Cc' header
self.failIf(rcpt not in cc)
# Check the message has actually been sent to the recipients
self.failIf(rcpt not in rcptlist)
# Build the list of the expected 'Bcc' recipients
bccrcpt = self.env.config.get('notification', 'smtp_always_bcc')
bcclist = [bccr.strip() for bccr in bccrcpt.split(',')]
for rcpt in bcclist:
# Check none of the 'Bcc' recipients appears
# in the 'To' header
self.failIf(rcpt in to)
# Check the message has actually been sent to the recipients
self.failIf(rcpt not in rcptlist)
run_bcc_feature(True)
run_bcc_feature(False)
def test_short_login(self):
"""Email addresses without a FQDN"""
def _test_short_login(enabled):
ticket = Ticket(self.env)
ticket['reporter'] = 'joeuser'
ticket['summary'] = 'This is a summary'
ticket.insert()
# Be sure that at least one email address is valid, so that we
# send a notification even if other addresses are not valid
self.env.config.set('notification', 'smtp_always_cc',
'joe.bar@example.net')
if enabled:
self.env.config.set('notification', 'use_short_addr', 'true')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# Msg should not have a 'To' header
if not enabled:
self.failIf('To' in headers)
else:
tolist = [addr.strip() for addr in headers['To'].split(',')]
# Msg should have a 'Cc' field
self.failIf('Cc' not in headers)
cclist = [addr.strip() for addr in headers['Cc'].split(',')]
if enabled:
# Msg should be delivered to the reporter
self.failIf(ticket['reporter'] not in tolist)
else:
# Msg should not be delivered to joeuser
self.failIf(ticket['reporter'] in cclist)
# Msg should still be delivered to the always_cc list
self.failIf(self.env.config.get('notification',
'smtp_always_cc') not in cclist)
# Validate with and without the short addr option enabled
for enable in [False, True]:
_test_short_login(enable)
def test_default_domain(self):
"""Default domain name"""
def _test_default_domain(enabled):
self.env.config.set('notification', 'always_notify_owner',
'false')
self.env.config.set('notification', 'always_notify_reporter',
'false')
self.env.config.set('notification', 'smtp_always_cc', '')
ticket = Ticket(self.env)
ticket['cc'] = 'joenodom, joewithdom@example.com'
ticket['summary'] = 'This is a summary'
ticket.insert()
# Be sure that at least one email address is valid, so that we
# send a notification even if other addresses are not valid
self.env.config.set('notification', 'smtp_always_cc',
'joe.bar@example.net')
if enabled:
self.env.config.set('notification', 'smtp_default_domain',
'example.org')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# Msg should always have a 'Cc' field
self.failIf('Cc' not in headers)
cclist = [addr.strip() for addr in headers['Cc'].split(',')]
self.failIf('joewithdom@example.com' not in cclist)
self.failIf('joe.bar@example.net' not in cclist)
if not enabled:
self.failIf(len(cclist) != 2)
self.failIf('joenodom' in cclist)
else:
self.failIf(len(cclist) != 3)
self.failIf('joenodom@example.org' not in cclist)
# Validate with and without a default domain
for enable in [False, True]:
_test_default_domain(enable)
def test_email_map(self):
"""Login-to-email mapping"""
self.env.config.set('notification', 'always_notify_owner', 'true')
self.env.config.set('notification', 'always_notify_reporter', 'true')
self.env.config.set('notification', 'smtp_always_cc',
'joe@example.com')
self.env.known_users = [('joeuser', 'Joe User',
'user-joe@example.com'),
('jim@domain', 'Jim User',
'user-jim@example.com')]
ticket = Ticket(self.env)
ticket['reporter'] = 'joeuser'
ticket['owner'] = 'jim@domain'
ticket['summary'] = 'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# Msg should always have a 'To' field
self.failIf('To' not in headers)
tolist = [addr.strip() for addr in headers['To'].split(',')]
# 'To' list should have been resolved to the real email address
self.failIf('user-joe@example.com' not in tolist)
self.failIf('user-jim@example.com' not in tolist)
self.failIf('joeuser' in tolist)
self.failIf('jim@domain' in tolist)
def test_from_author(self):
"""Using the reporter or change author as the notification sender"""
self.env.config.set('notification', 'smtp_from', 'trac@example.com')
self.env.config.set('notification', 'smtp_from_name', 'My Trac')
self.env.config.set('notification', 'smtp_from_author', 'true')
self.env.known_users = [('joeuser', 'Joe User',
'user-joe@example.com'),
('jim@domain', 'Jim User',
'user-jim@example.com'),
('noemail', 'No e-mail', ''),
('noname', '', 'user-noname@example.com')]
# Ticket creation uses the reporter
ticket = Ticket(self.env)
ticket['reporter'] = 'joeuser'
ticket['summary'] = 'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('"Joe User" <user-joe@example.com>', headers['From'])
# Ticket change uses the change author
ticket['summary'] = 'Modified summary'
ticket.save_changes('jim@domain', 'Made some changes')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=ticket['changetime'])
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('"Jim User" <user-jim@example.com>', headers['From'])
# Known author without name uses e-mail address only
ticket['summary'] = 'Final summary'
ticket.save_changes('noname', 'Final changes')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=ticket['changetime'])
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('user-noname@example.com', headers['From'])
# Known author without e-mail uses smtp_from and smtp_from_name
ticket['summary'] = 'Other summary'
ticket.save_changes('noemail', 'More changes')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=ticket['changetime'])
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('"My Trac" <trac@example.com>', headers['From'])
# Unknown author with name and e-mail address
ticket['summary'] = 'Some summary'
ticket.save_changes('Test User <test@example.com>', 'Some changes')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=ticket['changetime'])
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('"Test User" <test@example.com>', headers['From'])
# Unknown author with e-mail address only
ticket['summary'] = 'Some summary'
ticket.save_changes('test@example.com', 'Some changes')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=ticket['changetime'])
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('test@example.com', headers['From'])
# Unknown author uses smtp_from and smtp_from_name
ticket['summary'] = 'Better summary'
ticket.save_changes('unknown', 'Made more changes')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=ticket['changetime'])
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.assertEqual('"My Trac" <trac@example.com>', headers['From'])
def test_ignore_domains(self):
"""Non-SMTP domain exclusion"""
self.env.config.set('notification', 'ignore_domains',
'example.com, example.org')
self.env.known_users = \
[('kerberos@example.com', 'No Email', ''),
('kerberos@example.org', 'With Email', 'kerb@example.net')]
ticket = Ticket(self.env)
ticket['reporter'] = 'kerberos@example.com'
ticket['owner'] = 'kerberos@example.org'
ticket['summary'] = 'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# Msg should always have a 'To' field
self.failIf('To' not in headers)
tolist = [addr.strip() for addr in headers['To'].split(',')]
# 'To' list should not contain addresses with non-SMTP domains
self.failIf('kerberos@example.com' in tolist)
self.failIf('kerberos@example.org' in tolist)
# 'To' list should have been resolved to the actual email address
self.failIf('kerb@example.net' not in tolist)
self.failIf(len(tolist) != 1)
def test_admit_domains(self):
"""SMTP domain inclusion"""
self.env.config.set('notification', 'admit_domains',
'localdomain, server')
ticket = Ticket(self.env)
ticket['reporter'] = 'joeuser@example.com'
ticket['summary'] = 'This is a summary'
ticket['cc'] = 'joe.user@localdomain, joe.user@unknown, ' \
'joe.user@server'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# Msg should always have a 'To' field
self.failIf('Cc' not in headers)
cclist = [addr.strip() for addr in headers['Cc'].split(',')]
# 'Cc' list should contain addresses with SMTP included domains
self.failIf('joe.user@localdomain' not in cclist)
self.failIf('joe.user@server' not in cclist)
# 'Cc' list should not contain non-FQDN domains
self.failIf('joe.user@unknown' in cclist)
self.failIf(len(cclist) != 2+2)
def test_multiline_header(self):
"""Encoded headers split into multiple lines"""
self.env.config.set('notification', 'mime_encoding', 'qp')
ticket = Ticket(self.env)
ticket['reporter'] = 'joe.user@example.org'
# Forces non-ascii characters
ticket['summary'] = u'A_very %s súmmäry' % u' '.join(['long'] * 20)
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# Discards the project name & ticket number
subject = headers['Subject']
summary = subject[subject.find(':')+2:]
self.failIf(ticket['summary'] != summary)
def test_mimebody_b64(self):
"""MIME Base64/utf-8 encoding"""
self.env.config.set('notification', 'mime_encoding', 'base64')
ticket = Ticket(self.env)
ticket['reporter'] = 'joe.user@example.org'
ticket['summary'] = u'This is a long enough summary to cause Trac ' \
u'to generate a multi-line (2 lines) súmmäry'
ticket.insert()
self._validate_mimebody((base64, 'base64', 'utf-8'), \
ticket, True)
def test_mimebody_qp(self):
"""MIME QP/utf-8 encoding"""
self.env.config.set('notification', 'mime_encoding', 'qp')
ticket = Ticket(self.env)
ticket['reporter'] = 'joe.user@example.org'
ticket['summary'] = u'This is a long enough summary to cause Trac ' \
u'to generate a multi-line (2 lines) súmmäry'
ticket.insert()
self._validate_mimebody((quopri, 'quoted-printable', 'utf-8'),
ticket, True)
def test_mimebody_none_7bit(self):
"""MIME None encoding resulting in 7bit"""
self.env.config.set('notification', 'mime_encoding', 'none')
ticket = Ticket(self.env)
ticket['reporter'] = 'joe.user'
ticket['summary'] = u'This is a summary'
ticket.insert()
self._validate_mimebody((None, '7bit', 'utf-8'), \
ticket, True)
def test_mimebody_none_8bit(self):
"""MIME None encoding resulting in 8bit"""
self.env.config.set('notification', 'mime_encoding', 'none')
ticket = Ticket(self.env)
ticket['reporter'] = 'joe.user'
ticket['summary'] = u'This is a summary for Jöe Usèr'
ticket.insert()
self._validate_mimebody((None, '8bit', 'utf-8'), \
ticket, True)
def test_md5_digest(self):
"""MD5 digest w/ non-ASCII recipient address (#3491)"""
self.env.config.set('notification', 'always_notify_owner', 'false')
self.env.config.set('notification', 'always_notify_reporter', 'true')
self.env.config.set('notification', 'smtp_always_cc', '')
ticket = Ticket(self.env)
ticket['reporter'] = u'"Jöe Usèr" <joe.user@example.org>'
ticket['summary'] = u'This is a summary'
ticket.insert()
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
def test_updater(self):
"""No-self-notification option"""
def _test_updater(disable):
if disable:
self.env.config.set('notification', 'always_notify_updater',
'false')
ticket = Ticket(self.env)
ticket['reporter'] = 'joe.user@example.org'
ticket['summary'] = u'This is a súmmäry'
ticket['cc'] = 'joe.bar@example.com'
ticket.insert()
ticket['component'] = 'dummy'
now = datetime.now(utc)
ticket.save_changes('joe.bar2@example.com', 'This is a change',
when=now)
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=False, modtime=now)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
# checks for header existence
self.failIf(not headers)
# checks for updater in the 'To' recipient list
self.failIf('To' not in headers)
tolist = [addr.strip() for addr in headers['To'].split(',')]
if disable:
self.failIf('joe.bar2@example.com' in tolist)
else:
self.failIf('joe.bar2@example.com' not in tolist)
# Validate with and without a default domain
for disable in [False, True]:
_test_updater(disable)
def test_updater_only(self):
"""Notification w/ updater, w/o any other recipient (#4188)"""
self.env.config.set('notification', 'always_notify_owner', 'false')
self.env.config.set('notification', 'always_notify_reporter', 'false')
self.env.config.set('notification', 'always_notify_updater', 'true')
self.env.config.set('notification', 'smtp_always_cc', '')
self.env.config.set('notification', 'smtp_always_bcc', '')
self.env.config.set('notification', 'use_public_cc', 'false')
self.env.config.set('notification', 'use_short_addr', 'false')
self.env.config.set('notification', 'smtp_replyto',
'joeuser@example.net')
ticket = Ticket(self.env)
ticket['summary'] = 'Foo'
ticket.insert()
ticket['summary'] = 'Bar'
ticket['component'] = 'New value'
ticket.save_changes('joe@example.com', 'this is my comment')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
recipients = notifysuite.smtpd.get_recipients()
self.failIf(recipients is None)
self.failIf(len(recipients) != 1)
self.failIf(recipients[0] != 'joe@example.com')
def test_updater_is_reporter(self):
"""Notification to reporter w/ updater option disabled (#3780)"""
self.env.config.set('notification', 'always_notify_owner', 'false')
self.env.config.set('notification', 'always_notify_reporter', 'true')
self.env.config.set('notification', 'always_notify_updater', 'false')
self.env.config.set('notification', 'smtp_always_cc', '')
self.env.config.set('notification', 'smtp_always_bcc', '')
self.env.config.set('notification', 'use_public_cc', 'false')
self.env.config.set('notification', 'use_short_addr', 'false')
self.env.config.set('notification', 'smtp_replyto',
'joeuser@example.net')
ticket = Ticket(self.env)
ticket['summary'] = 'Foo'
ticket['reporter'] = u'joe@example.org'
ticket.insert()
ticket['summary'] = 'Bar'
ticket['component'] = 'New value'
ticket.save_changes('joe@example.org', 'this is my comment')
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=True)
recipients = notifysuite.smtpd.get_recipients()
self.failIf(recipients is None)
self.failIf(len(recipients) != 1)
self.failIf(recipients[0] != 'joe@example.org')
def _validate_mimebody(self, mime, ticket, newtk):
"""Body of a ticket notification message"""
(mime_decoder, mime_name, mime_charset) = mime
tn = TicketNotifyEmail(self.env)
tn.notify(ticket, newticket=newtk)
message = notifysuite.smtpd.get_message()
(headers, body) = parse_smtp_message(message)
self.failIf('MIME-Version' not in headers)
self.failIf('Content-Type' not in headers)
self.failIf('Content-Transfer-Encoding' not in headers)
self.failIf(not re.compile(r"1.\d").match(headers['MIME-Version']))
type_re = re.compile(r'^text/plain;\scharset="([\w\-\d]+)"$')
charset = type_re.match(headers['Content-Type'])
self.failIf(not charset)
charset = charset.group(1)
self.assertEqual(charset, mime_charset)
self.assertEqual(headers['Content-Transfer-Encoding'], mime_name)
# checks the width of each body line
for line in body.splitlines():
self.failIf(len(line) > MAXBODYWIDTH)
# attempts to decode the body, following the specified MIME endoding
# and charset
try:
if mime_decoder:
body = mime_decoder.decodestring(body)
body = unicode(body, charset)
except Exception, e:
raise AssertionError, e
# now processes each line of the body
bodylines = body.splitlines()
# body starts with one of more summary lines, first line is prefixed
# with the ticket number such as #<n>: summary
# finds the banner after the summary
banner_delim_re = re.compile(r'^\-+\+\-+$')
bodyheader = []
while ( not banner_delim_re.match(bodylines[0]) ):
bodyheader.append(bodylines.pop(0))
# summary should be present
self.failIf(not bodyheader)
# banner should not be empty
self.failIf(not bodylines)
# extracts the ticket ID from the first line
(tknum, bodyheader[0]) = bodyheader[0].split(' ', 1)
self.assertEqual(tknum[0], '#')
try:
tkid = int(tknum[1:-1])
self.assertEqual(tkid, 1)
except ValueError:
raise AssertionError, "invalid ticket number"
self.assertEqual(tknum[-1], ':')
summary = ' '.join(bodyheader)
self.assertEqual(summary, ticket['summary'])
# now checks the banner contents
self.failIf(not banner_delim_re.match(bodylines[0]))
banner = True
footer = None
props = {}
for line in bodylines[1:]:
# detect end of banner
if banner_delim_re.match(line):
banner = False
continue
if banner:
# parse banner and fill in a property dict
properties = line.split('|')
self.assertEqual(len(properties), 2)
for prop in properties:
if prop.strip() == '':
continue
(k, v) = prop.split(':')
props[k.strip().lower()] = v.strip()
# detect footer marker (weak detection)
if not footer:
if line.strip() == '--':
footer = 0
continue
# check footer
if footer != None:
footer += 1
# invalid footer detection
self.failIf(footer > 3)
# check ticket link
if line[:11] == 'Ticket URL:':
ticket_link = self.env.abs_href.ticket(ticket.id)
self.assertEqual(line[12:].strip(), "<%s>" % ticket_link)
# note project title / URL are not validated yet
# ticket properties which are not expected in the banner
xlist = ['summary', 'description', 'comment', 'time', 'changetime']
# check banner content (field exists, msg value matches ticket value)
for p in [prop for prop in ticket.values.keys() if prop not in xlist]:
self.failIf(not props.has_key(p))
# Email addresses might be obfuscated
if '@' in ticket[p] and '@' in props[p]:
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.