repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
grow/grow | grow/commands/subcommands/stage.py | 1 | 4986 | """Subcommand for staging pod to remote server."""
import os
import click
from grow.commands import shared
from grow.common import bulk_errors
from grow.common import rc_config
from grow.common import utils
from grow.deployments import stats
from grow.deployments.destinations import base
from grow.deployments.destinations import webreview_destination
from grow.extensions import hooks
from grow.performance import docs_loader
from grow.pods import pods
from grow.rendering import renderer
from grow import storage
CFG = rc_config.RC_CONFIG.prefixed('grow.stage')
# pylint: disable=too-many-locals
@click.command()
@shared.pod_path_argument
@click.option('--remote',
help='WebReview remote address (example: '
' example.com/owner/project). A remote must be specified'
' either using --remote or by configuring a deployment'
' named "webreview" in podspec.yaml.')
@click.option('--file', '--pod-path', 'pod_paths',
help='Build only pages affected by content files.', multiple=True)
@click.option('--subdomain', help='Assign a subdomain to this build.')
@click.option('--api-key',
help='API key for authorizing staging to WebReview projects.')
@shared.locale_option(help_text='Filter build routes to specific locale.')
@shared.force_untranslated_option(CFG)
@shared.preprocess_option(CFG)
@shared.threaded_option(CFG)
@shared.work_dir_option
@shared.routes_file_option()
@click.pass_context
def stage(context, pod_path, remote, pod_paths, locale, preprocess, subdomain, api_key,
force_untranslated, threaded, work_dir, routes_file):
"""Stages a build on a WebReview server."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
auth = context.parent.params.get('auth')
try:
pod = pods.Pod(root, storage=storage.FileStorage)
with pod.profile.timer('grow_stage'):
deployment = _get_deployment(pod, remote, subdomain, api_key)
# use the deployment's environment for preprocessing and later
# steps.
pod.set_env(deployment.get_env())
# Always clear the cache when building.
pod.podcache.reset()
require_translations = \
pod.podspec.localization \
and pod.podspec.localization.get('require_translations', False)
require_translations = require_translations \
and not force_untranslated
if auth:
deployment.login(auth)
if preprocess:
pod.preprocess()
repo = utils.get_git_repo(pod.root)
pod.router.use_simple()
is_partial = bool(pod_paths) or bool(locale)
if pod_paths:
pod_paths = [pod.clean_pod_path(path) for path in pod_paths]
pod.router.add_pod_paths(pod_paths)
elif routes_file:
pod.router.from_data(pod.read_json(routes_file))
else:
pod.router.add_all()
if locale:
pod.router.filter('whitelist', locales=list(locale))
if not work_dir:
# Preload the documents used by the paths after filtering.
docs_loader.DocsLoader.load_from_routes(pod, pod.router.routes)
paths = pod.router.routes.paths
stats_obj = stats.Stats(pod, paths=paths)
content_generator = deployment.dump(
pod, source_dir=work_dir, use_threading=threaded)
content_generator = hooks.generator_wrapper(
pod, 'pre_deploy', content_generator, 'stage')
deployment.deploy(content_generator, stats=stats_obj, repo=repo,
confirm=False, test=False, require_translations=require_translations,
is_partial=is_partial)
pod.podcache.write()
except bulk_errors.BulkErrors as err:
# Write the podcache files even when there are rendering errors.
pod.podcache.write()
bulk_errors.display_bulk_errors(err)
raise click.Abort()
# except base.Error as err:
# raise click.ClickException(str(err))
except pods.Error as err:
raise click.ClickException(str(err))
return pod
def _get_deployment(pod, remote, subdomain, api_key):
if remote:
dest_class = webreview_destination.WebReviewDestination
return dest_class(dest_class.Config(remote=remote, name=subdomain))
else:
try:
deployment = pod.get_deployment('webreview')
if subdomain:
deployment.config.subdomain = subdomain
if api_key:
deployment.webreview.api_key = api_key
return deployment
except ValueError:
text = ('Must provide --remote or specify a deployment named '
'"webreview" in podspec.yaml.')
raise click.ClickException(text)
| mit | 1a96940a7989a193a60c6bdf9836490f | 40.55 | 99 | 0.629563 | 4.204047 | false | false | false | false |
grow/grow | grow/translators/translators.py | 1 | 1107 | from . import google_translator_toolkit
from . import google_sheets
from grow.common import utils
from grow.extensions import extension_importer
_kinds_to_classes = {}
_builtins = (
google_translator_toolkit.GoogleTranslatorToolkitTranslator,
google_sheets.GoogleSheetsTranslator,
)
def install_translator(translator):
_kinds_to_classes[translator.KIND] = translator
def install_builtins():
global _destination_kinds_to_classes
for builtin in _builtins:
install_translator(builtin)
def create_translator(pod, kind, config, project_title=None, instructions=None):
install_builtins()
if kind not in _kinds_to_classes:
raise ValueError('No translator exists: "{}"'.format(kind))
translator = _kinds_to_classes[kind]
return translator(pod=pod, config=config,
project_title=project_title, instructions=instructions)
def register_extensions(extension_paths, pod_root):
for path in extension_paths:
cls = extension_importer.ExtensionImporter.find_extension(
path, pod_root)
install_translator(cls)
| mit | c715c40bcbf8ddc303cf2de620171d7e | 28.918919 | 80 | 0.719964 | 4.040146 | false | true | false | false |
grow/grow | grow/sdk/installers/nvm_installer.py | 2 | 1292 | """Nvm installer class."""
import subprocess
from grow.sdk.installers import base_installer
class NvmInstaller(base_installer.BaseInstaller):
"""Nvm installer."""
KIND = 'nvm'
@property
def should_run(self):
"""Should the installer run?"""
return self.pod.file_exists('/.nvmrc')
def check_prerequisites(self):
"""Check if required prerequisites are installed or available."""
status_command = '. $NVM_DIR/nvm.sh && nvm --version > /dev/null 2>&1'
not_found = subprocess.call(
status_command, **self.subprocess_args(shell=True)) == 127
if not_found:
install_commands = [
'Download nvm from https://github.com/creationix/nvm']
raise base_installer.MissingPrerequisiteError(
'The `nvm` command was not found.', install_commands=install_commands)
def install(self):
"""Install dependencies."""
install_command = '. $NVM_DIR/nvm.sh && nvm install'
process = subprocess.Popen(
install_command, **self.subprocess_args(shell=True))
code = process.wait()
if not code:
return
raise base_installer.InstallError(
'There was an error running `{}`.'.format(install_command))
| mit | 659266bd97e6a0918767f44cc5f063bd | 33.918919 | 86 | 0.609907 | 4.07571 | false | false | false | false |
grow/grow | grow/commands/group.py | 1 | 2411 | """Base command for grow."""
from grow.deployments.destinations import local as local_destination
import click
import os
import pkg_resources
version = pkg_resources.get_distribution('grow').version
HELP_TEXT = ('Grow is a declarative file-based website generator. Read docs at '
'https://grow.dev. This is version {}.'.format(version))
# pylint: disable=unused-argument
@click.group(help=HELP_TEXT)
@click.version_option(prog_name='grow', version=version)
@click.option('--auth', help='Information used to sign in to services that'
' require authentication. --auth should be an email address.',
envvar='GROW_AUTH')
@click.option('--clear-auth', default=False, is_flag=True,
help='Clears stored auth information.')
@click.option('--auth-key-file', help='Path to a private key file used for'
' services that require authentication.', envvar='GROW_KEY_FILE')
@click.option(
'--interactive-auth', default=False, is_flag=True,
envvar='INTERACTIVE_AUTH',
help='Whether to automatically open an authorization page in your'
' default web browser for any steps that require authentication.'
' If you are running Grow on a machine with access to a web browser,'
' you may use --interactive-auth to automatically open the web'
' browser. By default, this option is turned off, requiring you to'
' manually copy and paste an authorization code.')
@click.option('--profile',
default=False, is_flag=True,
help='Show report of pod operation timing for performance analysis.')
def grow(auth, clear_auth, auth_key_file, interactive_auth, profile):
"""Grow CLI command."""
if interactive_auth not in (None, False):
os.environ['INTERACTIVE_AUTH'] = str(interactive_auth)
if auth is not None:
os.environ['AUTH_EMAIL_ADDRESS'] = str(auth)
if auth_key_file is not None:
os.environ['AUTH_KEY_FILE'] = str(auth_key_file)
if clear_auth:
os.environ['CLEAR_AUTH'] = '1'
@grow.resultcallback()
def process_subcommands(pod, profile, **_):
"""Handle flags that need to process after the sub command."""
if not pod:
return
if profile:
destination = local_destination.LocalDestination(
local_destination.Config())
destination.pod = pod
destination.export_profile_report()
| mit | 7ace357c49ea934ec35969908912ccbd | 40.568966 | 83 | 0.669017 | 4.058923 | false | false | false | false |
grow/grow | grow/common/structures_test.py | 1 | 6032 | """Tests for structures."""
import unittest
from grow.common import structures
from operator import itemgetter
class AttributeDictTestCase(unittest.TestCase):
"""Test the attribute dict structure."""
def test_attributes(self):
"""Keys are accessible as attributes."""
obj = structures.AttributeDict({
'key': 'value',
})
self.assertEqual('value', obj['key'])
self.assertEqual('value', obj.key)
class DeepReferenceDictTestCase(unittest.TestCase):
"""Test the deep reference dict structure."""
def test_deep_reference(self):
"""Delimited keys are accessible."""
obj = structures.DeepReferenceDict({
'key': {
'sub_key': {
'value': 'foo',
}
},
})
self.assertEqual('foo', obj['key']['sub_key']['value'])
self.assertEqual('foo', obj['key.sub_key.value'])
def test_deep_reference_error(self):
"""Missing keys raise error."""
obj = structures.DeepReferenceDict({
'key': {},
})
with self.assertRaises(KeyError):
_ = obj['key.sub_key.value']
class SortedCollectionTestCase(unittest.TestCase):
"""Test the sorted collection structure."""
def setUp(self):
self.key = itemgetter(2)
self.coll = structures.SortedCollection(key=self.key)
for record in [
('roger', 'young', 30),
('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32)]:
self.coll.insert(record)
def test_clear(self):
"""Clears the collection."""
self.assertEqual(4, len(self.coll))
self.coll.clear()
self.assertEqual(0, len(self.coll))
def test_contains(self):
"""Contains matches."""
self.assertTrue(('roger', 'young', 30) in self.coll)
self.assertFalse(('bob', 'young', 30) in self.coll)
def test_copy(self):
"""Copies the collection."""
coll_copy = self.coll.copy()
self.assertEqual(4, len(self.coll))
self.assertEqual(4, len(coll_copy))
self.coll.insert(('roger', 'young', 30))
self.assertEqual(5, len(self.coll))
self.assertEqual(4, len(coll_copy))
def test_count(self):
"""Counts matches."""
self.assertEqual(1, self.coll.count(('roger', 'young', 30)))
self.coll.insert(('roger', 'young', 30))
self.assertEqual(2, self.coll.count(('roger', 'young', 30)))
def test_find(self):
"""Find first match."""
self.assertEqual(('angela', 'jones', 28), self.coll.find(28))
with self.assertRaises(ValueError):
self.coll.find(39)
def test_get_item(self):
"""Greater than equal."""
self.assertEqual(('bill', 'smith', 22), self.coll[0])
def test_ge(self):
"""Greater than equal."""
self.assertEqual(('angela', 'jones', 28), self.coll.find_ge(28))
with self.assertRaises(ValueError):
self.coll.find_ge(40)
def test_gt(self):
"""Greater than."""
self.assertEqual(('roger', 'young', 30), self.coll.find_gt(28))
with self.assertRaises(ValueError):
self.coll.find_gt(40)
def test_index(self):
"""Index from item."""
match = self.coll.find_gt(28)
self.assertEqual(2, self.coll.index(match))
def test_insert_right(self):
"""Index from item."""
self.assertEqual(1, self.coll.count(('roger', 'young', 30)))
self.coll.insert_right(('roger', 'young', 30))
self.assertEqual(2, self.coll.count(('roger', 'young', 30)))
def test_key(self):
"""Index from item."""
self.assertEqual(self.key, self.coll.key)
self.coll.key = itemgetter(0) # now sort by first name
self.assertEqual([('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)], list(self.coll))
def test_le(self):
"""Less than equal."""
self.assertEqual(('angela', 'jones', 28), self.coll.find_le(28))
with self.assertRaises(ValueError):
self.coll.find_le(10)
def test_lt(self):
"""Less than."""
self.assertEqual(('bill', 'smith', 22), self.coll.find_lt(28))
with self.assertRaises(ValueError):
self.coll.find_lt(10)
def test_remove(self):
"""Removes matches."""
item = ('roger', 'young', 30)
self.assertTrue(item in self.coll)
self.coll.remove(item)
self.assertFalse(item in self.coll)
def test_repr(self):
"""Output of repr."""
actual = repr(self.coll)
self.assertIn('SortedCollection(', actual)
self.assertIn("('bill', 'smith', 22)", actual)
self.assertIn("('angela', 'jones', 28)", actual)
self.assertIn("('roger', 'young', 30)", actual)
self.assertIn("('david', 'thomas', 32)", actual)
def test_sorting(self):
"""Collection is sorted."""
self.assertEqual([('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)], list(self.coll))
def test_sorting_default(self):
"""Collection is sorted using a default for the value."""
self.key = itemgetter(2)
self.coll = structures.SortedCollection(key=self.key, default=100)
for record in [
('roger', 'young', None),
('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32)]:
self.coll.insert(record)
self.assertEqual(
[
('bill', 'smith', 22),
('angela', 'jones', 28),
('david', 'thomas', 32),
('roger', 'young', None),
], list(self.coll))
| mit | 3545cbbec0aeb458d6bf22c160eee01d | 32.511111 | 74 | 0.533156 | 3.861716 | false | true | false | false |
grow/grow | grow/commands/subcommands/translations_filter.py | 1 | 2416 | """Subcommand for filtering untranslated messages."""
import os
import click
from grow.commands import shared
from grow.common import rc_config
from grow.pods import pods
from grow import storage
CFG = rc_config.RC_CONFIG.prefixed('grow.translations.filter')
@click.command(name='filter')
@shared.pod_path_argument
@click.option('-o', type=str, default=None,
help='Where to write the extracted translation catalog. The path'
' must be relative to the pod\'s root.')
@click.option('-f', default=CFG.get('force', False), is_flag=True,
help='Whether to force an update when writing localized message'
' catalogs.')
@shared.include_header_option(CFG)
@shared.include_obsolete_option(CFG)
@shared.locale_option(
help_text='Which locale(s) to analyze when creating template catalogs'
' that contain only untranslated messages. This option is'
' only applicable when using --untranslated.')
@shared.localized_option(CFG)
@shared.out_dir_option(
CFG, help_text=('Where to write extracted localized translation catalogs.'
' The path must be relative to the pod\'s root. This option'
' is only applicable when using --localized.'))
@shared.path_option
@shared.exclude_path_option
def translations_filter(pod_path, locale, o, include_obsolete, localized, path, exclude_path,
include_header, out_dir, f):
"""Filters untranslated messages from catalogs into new catalogs."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
pod = pods.Pod(root, storage=storage.FileStorage)
with pod.profile.timer('grow_translations_filter'):
catalogs = pod.get_catalogs()
if not locale:
locale = catalogs.list_locales()
if out_dir and pod.file_exists(out_dir) and not f:
raise click.UsageError(
'{} exists. You must specify a directory that does not exist, or '
'use the "-f" flag, which will force update catalogs within the '
'specified directory.'.format(out_dir))
catalogs.filter(out_path=o, out_dir=out_dir,
include_obsolete=include_obsolete,
localized=localized, paths=path, exclude_paths=exclude_path,
include_header=include_header, locales=locale)
return pod
| mit | 1891d06a095d668b45d6252348e6a045 | 44.584906 | 93 | 0.651904 | 4.108844 | false | true | false | false |
reviewboard/reviewboard | reviewboard/accounts/trophies.py | 1 | 8773 | import re
from django.utils.translation import gettext_lazy as _
from djblets.registries.registry import (ALREADY_REGISTERED,
ATTRIBUTE_REGISTERED,
DEFAULT_ERRORS,
NOT_REGISTERED,
Registry,
UNREGISTER)
from djblets.urls.staticfiles import static_lazy
from djblets.util.decorators import augment_method_from
class TrophyType(object):
"""Base class for a type of trophy.
Trophies are achievements that can be awarded to users based on some
aspect of a review request. When a review request is filed, each registered
trophy type (managed by the :py:data:`trophies` registry) will be checked
using :py:meth:`qualifies` to see if the trophy can be awarded. If so, the
trophy will be recorded and shown on the review request page.
A trophy should include a displayable name, a category (essentially the
ID of the trophy), and details for the trophy image.
"""
#: The category of the trophy.
#:
#: This is the string ID of the trophy. For historical reasons, it's
#: referred to as a category and not an ID.
category = None
#: The name of the trophy.
name = None
#: URLs for the trophy images.
#:
#: This is a dictionary of images, where each key is a resolution
#: specifier (``1x``, ``2x``, etc.), and the value is a URL.
#:
#: Each must have widths/heights that are multipliers on the base
#: width/height for the ``1x`` specifier.
image_urls = {}
#: The width of the base image.
image_width = None
#: The height of the base image.
#:
#: It is recommended to use a height of 48px max.
image_height = None
def get_display_text(self, trophy):
"""Return the text to display in the trophy banner.
Args:
trophy (reviewboard.accounts.models.Trophy):
The stored trophy information.
Returns:
unicode:
The display text for the trophy banner.
"""
raise NotImplementedError
def qualifies(self, review_request):
"""Return whether this trophy should be given to this review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to check for the trophy.
Returns:
bool:
``True`` if the trophy should be given, or ``False`` if not.
"""
raise NotImplementedError
def format_display_text(self, request, trophy, **kwargs):
"""Format the display text for the trophy.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
trophy (reviewboard.accounts.models.Trophy):
The trophy instance.
**kwargs (dict):
Additional keyword arguments to use for formatting.
Returns:
unicode:
The rendered text.
"""
if self.display_format_str is None:
raise NotImplementedError(
'%s does not define the format_display_str attribute.'
% type(self).__name__
)
return self.display_format_str % dict(kwargs, **{
'recipient': trophy.user.get_profile().get_display_name(
getattr(request, 'user', None)),
'review_request_id': trophy.review_request.display_id,
})
class MilestoneTrophy(TrophyType):
"""A milestone trophy.
It is awarded if review request ID is greater than 1000 and is a non-zero
digit followed by only zeroes (e.g. 1000, 5000, 10000).
"""
category = 'milestone'
title = _('Milestone Trophy')
image_urls = {
'1x': static_lazy('rb/images/trophies/sparkly.png'),
'2x': static_lazy('rb/images/trophies/sparkly@2x.png'),
}
image_width = 33
image_height = 35
display_format_str = _(
'%(recipient)s got review request #%(review_request_id)d!'
)
def qualifies(self, review_request):
"""Return whether this trophy should be given to this review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to check for the trophy.
Returns:
bool:
``True`` if the trophy should be given, or ``False`` if not.
"""
return (
review_request.display_id >= 1000 and
re.match(r'^[1-9]0+$', str(review_request.display_id))
)
class FishTrophy(TrophyType):
"""A fish trophy.
Give a man a fish, he'll waste hours trying to figure out why.
"""
category = 'fish'
name = _('Fish Trophy')
image_urls = {
'1x': static_lazy('rb/images/trophies/fish.png'),
'2x': static_lazy('rb/images/trophies/fish@2x.png'),
}
image_width = 33
image_height = 37
display_format_str = _('%(recipient)s got a fish trophy!')
def qualifies(self, review_request):
"""Return whether this trophy should be given to this review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to check for the trophy.
Returns:
bool:
``True`` if the trophy should be given, or ``False`` if not.
"""
id_str = str(review_request.display_id)
return (review_request.display_id >= 1000 and
id_str == ''.join(reversed(id_str)))
class UnknownTrophy(TrophyType):
"""A trophy with an unknown category.
The data for this trophy exists in the database but its category does not
match the category of any registered trophy types.
"""
name = 'Unknown Trophy'
class TrophyRegistry(Registry):
lookup_attrs = ('category',)
default_errors = dict(DEFAULT_ERRORS, **{
ALREADY_REGISTERED: _(
'Could not register trophy type %(item)s. This trophy type is '
'already registered or its category conflicts with another trophy.'
),
ATTRIBUTE_REGISTERED: _(
'Could not register trophy type %(item)s: Another trophy type '
'(%(duplicate)s) is already registered with the same category.'
),
NOT_REGISTERED: _(
'No trophy type was found matching "%(attr_value)s".'
),
UNREGISTER: _(
'Could not unregister trophy type %(item)s: This trophy type '
'was not yet registered.'
),
})
@augment_method_from(Registry)
def register(self, trophy_type):
"""Register a new trophy type.
Args:
trophy_type (type):
The trophy type (subclass of :py:class:`TrophyType`) to
register.
Raises:
djblets.registries.errors.RegistrationError:
The :py:attr:`TrophyType.category` value is missing on the
trophy.
djblets.registries.errors.AlreadyRegisteredError:
This trophy type, or another with the same category, was
already registered.
"""
pass
@augment_method_from(Registry)
def unregister(self, trophy_type):
"""Unregister a trophy type.
Args:
trophy_type (type):
The trophy type (subclass of :py:class:`TrophyType`) to
unregister.
Raises:
djblets.registries.errors.ItemLookupError:
This trophy type was not registered.
"""
pass
def get_for_category(self, category):
"""Return the TrophyType instance matching a given trophy category.
If there's no registered trophy for the category,
:py:class:`UnknownTrophy` will be returned.
Args:
category (unicode):
The stored category for the trophy.
Returns:
TrophyType:
The trophy matching the given category.
"""
try:
return self.get('category', category)
except self.lookup_error_class:
return UnknownTrophy
def get_defaults(self):
"""Return the default trophies for the registry.
This is used internally by the parent registry class to populate the
list of default, buit-in trophies available to review requests.
Returns:
list of TrophyType:
The list of default trophies.
"""
return [
MilestoneTrophy,
FishTrophy,
]
#: The registry of available trophies.
trophies_registry = TrophyRegistry()
| mit | 797b06ab5079ab70f5a437405663ca8d | 29.674825 | 79 | 0.584977 | 4.264949 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/root.py | 1 | 3448 | from djblets.util.decorators import augment_method_from
from djblets.webapi.resources.root import RootResource as DjbletsRootResource
from reviewboard.webapi.server_info import get_server_info
from reviewboard.webapi.decorators import (webapi_check_login_required,
webapi_check_local_site)
from reviewboard.webapi.resources import WebAPIResource, resources
class RootResource(WebAPIResource, DjbletsRootResource):
"""Links to all the main resources, including URI templates to resources
anywhere in the tree.
This should be used as a starting point for any clients that need to access
any resources in the API. By browsing through the resource tree instead of
hard-coding paths, your client can remain compatible with any changes in
the resource URI scheme.
This also contains information on the server and the capabilities of
the API. This information was formerly provided only by the Server Info
resource, but has been moved here as a convenience to clients.
"""
mimetype_vendor = 'reviewboard.org'
def __init__(self, *args, **kwargs):
super(RootResource, self).__init__([
resources.default_reviewer,
resources.extension,
resources.hosting_service,
resources.hosting_service_account,
resources.oauth_app,
resources.oauth_token,
resources.repository,
resources.review_group,
resources.review_request,
resources.root_diff_comment,
resources.root_file_attachment_comment,
resources.root_general_comment,
resources.root_review,
resources.search,
resources.server_info,
resources.session,
resources.user,
resources.validation,
resources.webhook,
], *args, **kwargs)
@webapi_check_login_required
@webapi_check_local_site
@augment_method_from(DjbletsRootResource)
def get(self, request, *args, **kwargs):
"""Retrieves the list of top-level resources and templates."""
pass
def get_uri_templates(self, request, *args, **kwargs):
"""Return all URI templates in the resource tree.
Args:
request (django.http.HttpRequest):
The GET request for the Root resource.
*args (tuple, unused):
Additional unused arguments.
**kwargs (dict, unused):
Additional unused keyword arguments.
Returns:
dict:
A mapping of resources to their URI templates.
"""
# Manually include this resource to maintain compatibility with
# our Python 2.7 API behavior. This is a bandaid for a larger
# issue that stems from resources that share the same name but
# have different URI templates.
base_href = request.build_absolute_uri()
self.register_uri_template(
name='search',
relative_path='%ssearch/{username}/' % base_href)
return super().get_uri_templates(request, *args, **kwargs)
def serialize_root(self, request, *args, **kwargs):
root = super(RootResource, self).serialize_root(request, *args,
**kwargs)
root.update(get_server_info(request))
return root
root_resource = RootResource()
| mit | 0ef4acd70677e1ab1a75e3f4787ca81e | 36.478261 | 79 | 0.634281 | 4.634409 | false | false | false | false |
exercism/python | exercises/practice/scale-generator/.meta/example.py | 2 | 1155 | class Scale:
ASCENDING_INTERVALS = ['m', 'M', 'A']
CHROMATIC_SCALE = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']
FLAT_CHROMATIC_SCALE = ['A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab']
FLAT_KEYS = ['F', 'Bb', 'Eb', 'Ab', 'Db', 'Gb', 'd', 'g', 'c', 'f', 'bb', 'eb']
def __init__(self, tonic, intervals=None):
self.tonic = tonic.capitalize()
self.intervals = intervals
self.chromatic_scale = (self.FLAT_CHROMATIC_SCALE if tonic in self.FLAT_KEYS else self.CHROMATIC_SCALE)
def chromatic(self):
return self._reorder_chromatic_scale()
def interval(self, intervals):
last_index = 0
pitches = []
scale = self._reorder_chromatic_scale()
for _, interval in enumerate(intervals):
pitches.append(scale[last_index])
last_index += self.ASCENDING_INTERVALS.index(interval) + 1
pitches.append(self.tonic)
return pitches
def _reorder_chromatic_scale(self):
index = self.chromatic_scale.index(self.tonic)
return self.chromatic_scale[index:] + self.chromatic_scale[:index]
| mit | c88f0507a9706aa7d2a654436a1e440c | 37.5 | 111 | 0.562771 | 2.984496 | false | false | false | false |
exercism/python | exercises/concept/ellens-alien-game/classes_test.py | 2 | 6459 | import unittest
import pytest
try:
from classes import new_aliens_collection
except ImportError as err:
raise ImportError("We tried to import the new_aliens_collection() function, "
"but could not find it. Did you remember to create it?") from err
try:
from classes import Alien
except ImportError as err:
raise ImportError("We tried to import the 'Alien' class from the classes.py file, but could not find it. "
"Did you remember to create it?") from err
class ClassesTest(unittest.TestCase):
# Test Alien class exists and correctly initialised.
@pytest.mark.task(taskno=1)
def test_alien_has_correct_initial_coordinates(self):
alien = Alien(2, -1)
error = ("Expected object to be at position (2, -1) but instead "
f"found it initialized to position {(alien.x_coordinate, alien.y_coordinate)}.")
self.assertEqual((2, -1), (alien.x_coordinate, alien.y_coordinate), msg=error)
@pytest.mark.task(taskno=1)
def test_alien_has_health(self):
alien = Alien(0, 0)
error = ("Expected object's health to be 3 but instead found "
f"it had a health of {alien.health}.")
self.assertEqual(3, alien.health, msg=error)
# Test instance variables are unique to specific instances.
@pytest.mark.task(taskno=1)
def test_alien_instance_variables(self):
alien_one = Alien(-8, -1)
alien_two = Alien(2, 5)
coord_x_error = ("Expected alien_one and alien_two to have different x "
f"positions. Instead both x's were: {alien_two.x_coordinate}.")
coord_y_error = ("Expected alien_one and alien_two to have different y "
f"positions. Instead both y's were: {alien_two.y_coordinate}.")
self.assertFalse(alien_one.x_coordinate == alien_two.x_coordinate, msg=coord_x_error)
self.assertFalse(alien_one.y_coordinate == alien_two.y_coordinate, msg=coord_y_error)
# Test class methods work as specified.
@pytest.mark.task(taskno=2)
def test_alien_hit_method(self):
#There are two valid interpretations for this method/task.
#`self.health -= 1` and `self.health = max(0, self.health - 1)`
#The tests for this task reflect this ambiguity.
data = [(1, (2,)), (2, (1,)), (3, (0,)), (4, (0, -1)), (5, (0, -2)), (6, (0, -3))]
for variant, (iterations, result) in enumerate(data, 1):
alien = Alien(2, 2)
with self.subTest(f'variation #{variant}', input=iterations, output=result):
error = ("Expected hit method to decrement health by 1. "
f"Health is {alien.health} when it should be {result}.")
for _ in range(iterations):
alien.hit()
self.assertIn(alien.health, result, msg=error)
@pytest.mark.task(taskno=3)
def test_alien_is_alive_method(self):
alien = Alien(0, 1)
alive_error = "Alien is dead while health is greater than 0."
dead_error = "Alien is alive while health is less than or equal to 0."
for _ in range(5):
alien.hit()
if alien.health > 0:
self.assertTrue(alien.is_alive(), msg=alive_error)
else:
self.assertFalse(alien.is_alive(), msg=dead_error)
@pytest.mark.task(taskno=4)
def test_alien_teleport_method(self):
alien = Alien(0, 0)
alien.teleport(-1, -4)
error = (
"Expected alien to be at position (-1, -4) but "
f"instead found it in position {(alien.x_coordinate, alien.y_coordinate)}.")
self.assertEqual((-1, -4), (alien.x_coordinate, alien.y_coordinate), msg=error)
@pytest.mark.task(taskno=5)
def test_alien_collision_detection_method(self):
alien = Alien(7, 3)
error = "Expected collision_detection method to not be implemented."
self.assertIsNone(alien.collision_detection(Alien(7, 2)), msg=error)
# Test class variables are identical across instances
@pytest.mark.task(taskno=6)
def test_alien_class_variable(self):
alien_one = Alien(0, 2)
alien_two = Alien(-6, -1)
Alien.total_aliens_created = -2
error_one = "Expected the total_aliens_created variable to be identical."
error_two = "Expected the health variable to be identical."
self.assertEqual(alien_two.total_aliens_created, alien_one.total_aliens_created, msg=error_one)
self.assertEqual(alien_two.health, alien_one.health, msg=error_two)
# Test total_aliens_created increments upon object instantiation
@pytest.mark.task(taskno=6)
def test_alien_total_aliens_created(self):
Alien.total_aliens_created = 0
aliens = [Alien(-2, 6)]
error = ("Expected total_aliens_created to equal 1. Instead "
f"it equals: {aliens[0].total_aliens_created}.")
self.assertEqual(1, aliens[0].total_aliens_created, msg=error)
aliens.append(Alien(3, 5))
aliens.append(Alien(-5, -5))
def error_text(alien, variable):
return (
"Expected all total_aliens_created variables to be "
"equal to number of alien instances (i.e. 3). Alien "
f"number {alien}'s total_aliens_created variable "
f"is equal to {variable}.")
tac_list = [alien.total_aliens_created for alien in aliens]
self.assertEqual(3, tac_list[0], msg=error_text(1, tac_list[0]))
self.assertEqual(3, tac_list[1], msg=error_text(2, tac_list[1]))
self.assertEqual(3, tac_list[2], msg=error_text(3, tac_list[2]))
# Test that the user knows how to create objects themselves
@pytest.mark.task(taskno=7)
def test_new_aliens_collection(self):
position_data = [(-2, 6), (1, 5), (-4, -3)]
obj_list = new_aliens_collection(position_data)
obj_error = "new_aliens_collection must return a list of Alien objects."
for obj, position in zip(obj_list, position_data):
self.assertIsInstance(obj, Alien, msg=obj_error)
pos_error = (
f"Expected object to be at position {position} but "
f"instead found it initialized to position {(obj.x_coordinate, obj.y_coordinate)}.")
self.assertEqual(position, (obj.x_coordinate, obj.y_coordinate), msg=pos_error)
| mit | 95c1bb0361cf8c01daab228d74860bc6 | 41.493421 | 110 | 0.61604 | 3.498917 | false | true | false | false |
pinax/django-user-accounts | account/forms.py | 1 | 7834 | import re
from collections import OrderedDict
from django import forms
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from account.conf import settings
from account.hooks import hookset
from account.models import EmailAddress
from account.utils import get_user_lookup_kwargs
alnum_re = re.compile(r"^\w+$")
class PasswordField(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("widget", forms.PasswordInput(render_value=False))
self.strip = kwargs.pop("strip", True)
super(PasswordField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ""
value = force_str(value)
if self.strip:
value = value.strip()
return value
class SignupForm(forms.Form):
username = forms.CharField(
label=_("Username"),
max_length=30,
widget=forms.TextInput(),
required=True
)
email = forms.EmailField(
label=_("Email"),
widget=forms.TextInput(), required=True
)
password = PasswordField(
label=_("Password"),
strip=settings.ACCOUNT_PASSWORD_STRIP,
)
password_confirm = PasswordField(
label=_("Password (again)"),
strip=settings.ACCOUNT_PASSWORD_STRIP,
)
code = forms.CharField(
max_length=64,
required=False,
widget=forms.HiddenInput()
)
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(_("Usernames can only contain letters, numbers and underscores."))
User = get_user_model()
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": self.cleaned_data["username"]
})
qs = User.objects.filter(**lookup_kwargs)
if not qs.exists():
return self.cleaned_data["username"]
raise forms.ValidationError(_("This username is already taken. Please choose another."))
def clean_email(self):
value = self.cleaned_data["email"]
qs = EmailAddress.objects.filter(email__iexact=value)
if not qs.exists() or not settings.ACCOUNT_EMAIL_UNIQUE:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
def clean(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
class LoginForm(forms.Form):
password = PasswordField(
label=_("Password"),
strip=settings.ACCOUNT_PASSWORD_STRIP,
)
remember = forms.BooleanField(
label=_("Remember Me"),
required=False
)
user = None
def clean(self):
if self._errors:
return
user = auth.authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is inactive."))
else:
raise forms.ValidationError(self.authentication_fail_message)
return self.cleaned_data
def user_credentials(self):
return hookset.get_user_credentials(self, self.identifier_field)
class LoginUsernameForm(LoginForm):
username = forms.CharField(label=_("Username"), max_length=30)
authentication_fail_message = _("The username and/or password you specified are not correct.")
identifier_field = "username"
def __init__(self, *args, **kwargs):
super(LoginUsernameForm, self).__init__(*args, **kwargs)
field_order = ["username", "password", "remember"]
if hasattr(self.fields, "keyOrder"):
self.fields.keyOrder = field_order
else:
self.fields = OrderedDict((k, self.fields[k]) for k in field_order)
class LoginEmailForm(LoginForm):
email = forms.EmailField(label=_("Email"))
authentication_fail_message = _("The email address and/or password you specified are not correct.")
identifier_field = "email"
def __init__(self, *args, **kwargs):
super(LoginEmailForm, self).__init__(*args, **kwargs)
field_order = ["email", "password", "remember"]
if hasattr(self.fields, "keyOrder"):
self.fields.keyOrder = field_order
else:
self.fields = OrderedDict((k, self.fields[k]) for k in field_order)
class ChangePasswordForm(forms.Form):
password_current = forms.CharField(
label=_("Current Password"),
widget=forms.PasswordInput(render_value=False)
)
password_new = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password_new_confirm = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean_password_current(self):
if not self.user.check_password(self.cleaned_data.get("password_current")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["password_current"]
def clean_password_new_confirm(self):
if "password_new" in self.cleaned_data and "password_new_confirm" in self.cleaned_data:
password_new = self.cleaned_data["password_new"]
password_new_confirm = self.cleaned_data["password_new_confirm"]
return hookset.clean_password(password_new, password_new_confirm)
return self.cleaned_data["password_new_confirm"]
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True)
def clean_email(self):
value = self.cleaned_data["email"]
if not EmailAddress.objects.filter(email__iexact=value).exists():
raise forms.ValidationError(_("Email address can not be found."))
return value
class PasswordResetTokenForm(forms.Form):
password = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password_confirm = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def clean_password_confirm(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
password = self.cleaned_data["password"]
password_confirm = self.cleaned_data["password_confirm"]
return hookset.clean_password(password, password_confirm)
return self.cleaned_data["password_confirm"]
class SettingsForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True)
timezone = forms.ChoiceField(
label=_("Timezone"),
choices=[("", "---------")] + settings.ACCOUNT_TIMEZONES,
required=False
)
if settings.USE_I18N:
language = forms.ChoiceField(
label=_("Language"),
choices=settings.ACCOUNT_LANGUAGES,
required=False
)
def clean_email(self):
value = self.cleaned_data["email"]
if self.initial.get("email") == value:
return value
qs = EmailAddress.objects.filter(email__iexact=value)
if not qs.exists() or not settings.ACCOUNT_EMAIL_UNIQUE:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
| mit | 90f697e1e4d38d379a129aa6a04f29ea | 33.209607 | 106 | 0.63518 | 4.273868 | false | false | false | false |
exercism/python | exercises/concept/black-jack/.meta/exemplar.py | 2 | 3205 | """Functions to help play and score a game of blackjack.
How to play blackjack: https://bicyclecards.com/how-to-play/blackjack/
"Standard" playing cards: https://en.wikipedia.org/wiki/Standard_52-card_deck
"""
def value_of_card(card):
"""Determine the scoring value of a card.
:param card: str - given card.
:return: int - value of a given card. See below for values.
1. 'J', 'Q', or 'K' (otherwise known as "face cards") = 10
2. 'A' (ace card) = 1
3. '2' - '10' = numerical value.
"""
if card in ('JQK'):
value = 10
elif card == 'A':
value = 1
else:
value = int(card)
return value
def higher_card(card_one, card_two):
"""Determine which card has a higher value in the hand.
:param card_one, card_two: str - cards dealt in hand. See below for values.
:return: str or tuple - resulting Tuple contains both cards if they are of equal value.
1. 'J', 'Q', or 'K' (otherwise known as "face cards") = 10
2. 'A' (ace card) = 1
3. '2' - '10' = numerical value.
"""
card_one_value = value_of_card(card_one)
card_two_value = value_of_card(card_two)
if card_one_value == card_two_value:
result = card_one, card_two
elif card_one_value > card_two_value:
result = card_one
else:
result = card_two
return result
def value_of_ace(card_one, card_two):
"""Calculate the most advantageous value for the ace card.
:param card_one, card_two: str - card dealt. See below for values.
:return: int - either 1 or 11 value of the upcoming ace card.
1. 'J', 'Q', or 'K' (otherwise known as "face cards") = 10
2. 'A' (ace card) = 11 (if already in hand)
3. '2' - '10' = numerical value.
"""
card_one_value = 11 if card_one == 'A' else value_of_card(card_one)
card_two_value = 11 if card_two == 'A' else value_of_card(card_two)
ace_value = 1 if 11 + (card_one_value + card_two_value) > 21 else 11
return ace_value
def is_blackjack(card_one, card_two):
"""Determine if the hand is a 'natural' or 'blackjack'.
:param card_one, card_two: str - card dealt. See below for values.
:return: bool - is the hand is a blackjack (two cards worth 21).
1. 'J', 'Q', or 'K' (otherwise known as "face cards") = 10
2. 'A' (ace card) = 11 (if already in hand)
3. '2' - '10' = numerical value.
"""
return (card_one == 'A' or card_two == 'A') and (value_of_card(card_one) == 10 or value_of_card(card_two) == 10)
def can_split_pairs(card_one, card_two):
"""Determine if a player can split their hand into two hands.
:param card_one, card_two: str - cards dealt.
:return: bool - can the hand be split into two pairs? (i.e. cards are of the same value).
"""
return value_of_card(card_one) == value_of_card(card_two)
def can_double_down(card_one, card_two):
"""Determine if a blackjack player can place a double down bet.
:param card_one, card_two: str - first and second cards in hand.
:return: bool - can the hand can be doubled down? (i.e. totals 9, 10 or 11 points).
"""
return 8 < value_of_card(card_one) + value_of_card(card_two) < 12
| mit | 9c4662db2c667efd4c2ad88a360aefe8 | 28.953271 | 116 | 0.613417 | 3.10562 | false | false | false | false |
exercism/python | exercises/concept/cater-waiter/sets_categories_data.py | 2 | 18454 | # pylint: disable-all
# flake8: noqa,
VEGAN = {
'chives', 'nutritional yeast', 'tomato', 'orange zest', 'pareve puff pastry', 'cashews', 'tofu',
'rice vinegar', 'black pepper', 'cardamom powder', 'mustard seeds', 'parev shortcrust pastry',
'scallions', 'water', 'chinese eggplants', 'lemon juice', 'smoked paprika', 'cloves', 'basmati rice',
'cayenne pepper', 'green onions', 'sunflower oil', 'mixed herbs', 'garlic paste', 'parsley',
'fresh red chili', 'flour', 'garlic', 'oregano', 'green beans', 'harissa', 'brandy', 'fresh basil',
'coriander', 'vinegar', 'thyme', 'coriander seeds', 'clove powder', 'pomegranate seeds',
'sugar', 'yukon gold potato', 'sesame oil', 'cinnamon powder', 'butternut squash', 'allspice powder',
'red pepper flakes', 'soy sauce', 'sesame seeds', 'cornstarch', 'mango powder', 'vegetable stock',
'raisins', 'barley malt', 'olive oil', 'ground almonds', 'white rice', 'garlic powder', 'walnuts',
'saffron powder', 'red chili powder', 'turmeric powder', 'spring onions', 'yeast', 'khmeli suneli',
'peanuts', 'bulgur', 'cilantro', 'onion', 'calabash nutmeg', 'black-eyed peas', 'grains of selim',
'zucchini', 'currants', 'spaghetti', 'figs', 'red bell pepper', 'lemon zest', 'ground turmeric',
'chili flakes', 'chickpea flour', 'hing', 'slivered almonds', 'vegetable oil', 'serrano chili',
'salt', 'yellow onions', 'salt', 'coriander powder', 'orange zest', 'garam masala', 'yellow onion',
'smoked tofu', 'bell pepper', 'apples', 'brown sugar', 'coconut oil', 'orange juice',
'sorghum stems', 'dried blueberries', 'tomato paste', 'curry leaves', 'vegetarian worcestershire sauce',
'hot water', 'fresh ginger', 'firm tofu', 'eggplants', 'bell pepper', 'siracha', 'carrot', 'nigella seeds',
'vegan butter', "za'atar", 'baking soda', 'brown sugar', 'dried cranberries', 'kosher salt', 'mangoes',
'vegan unsweetened yoghurt', 'black peppercorn', 'vinegar', 'dill', 'barberries', 'honey', 'tomatoes',
'yellow split peas', 'persian cucumber', 'turmeric', 'lemon', 'cumin', 'oil', 'mushrooms', 'spring onion',
'pomegranate concentrate', 'cumin seeds', 'balsamic vinegar', 'ripe plantains', 'celeriac', 'breadcrumbs',
'ginger', 'dried cherries', 'red onion', 'rosemary', 'chopped parsley', 'corn', 'cumin powder', 'pecans',
'silken tofu', 'pomegranate molasses', 'carrot', 'corn flour', 'mashed potatoes'
}
VEGETARIAN = {
'almonds', 'chives', 'limes', 'puff pastry', 'onion', 'cashews', 'red cabbage', 'red wine vinegar',
'brussel sprouts', 'fresh corn', 'black pepper', 'lemon juice', 'roasted corn', 'eggs',
'fresh cilantro leaves', 'shiitake mushrooms', 'sunflower oil', 'sage', 'dijon mustard',
'blanched almonds', 'dates', 'flour', 'fresh pea tendrils', 'garlic', 'egg', 'green beans',
'yukon gold potato', 'vermicelli noodles', 'onions', 'avocado', 'dried lasagna noodles',
'thyme', 'cauliflower', 'basil', 'watercress', 'black beans', 'butternut squash', 'red thai chili',
'masa', 'red chili', 'red onions', 'jalapeño chili', 'grated nutmeg', 'feta cheese', 'hazelnuts',
'soy sauce', 'shallots', 'chipotle chili', 'vegetable bullion', 'fresh cherry tomatoes', 'olive oil',
'milk', 'fresh cherry bocconcini', 'crema', 'marmite', 'walnuts', 'nutmeg', 'ricotta cheese',
'chestnuts', 'mint leaves', 'lime juice', 'white wine', 'apples', 'pearl barley', 'cotija cheese',
'zucchini', 'currants', 'leek', 'pomegranate', 'lemon zest', 'avocados', 'parmesan cheese', 'mint',
'leeks', 'fresh artichoke hearts', 'vegetable oil', 'brazil nuts', 'red chili', 'sharp white cheddar',
'salt', 'pepitas', 'green lentils', 'beets', 'celery', 'smoked tofu', 'fresh tomatoes',
'puff pastry sheets', 'palm sugar', 'vegetarian fish sauce', 'oil marinated artichokes', 'hot water',
'chickpeas', 'firm tofu', 'wombok', 'carrot', 'asparagus', 'bean sprouts', 'kosher salt',
'pasilla chili', 'tomatillos', 'parmesan rind', 'pasta sheets', 'cream', 'butter', 'croutons',
'lacinato kale', 'fresh or frozen fava beans', 'fresh pumpkin', 'honey', 'tomatoes', 'olives',
'capers', 'pine nuts', 'lemon', 'cumin', 'ancho chili', 'fresh peas', 'spring roll wrappers',
'balsamic vinegar', 'portobello mushrooms', 'breadcrumbs', 'blue cheese', 'red onion',
'rosemary', 'pecans', 'carrot', 'corn flour', 'toasted peanuts'
}
PALEO = {
'cinnamon', 'chiles de árbol', 'chives', 'limes', 'allspice', 'zucchini', 'seranno chili', 'lemon zest',
'apple cider vinegar', 'avocados', 'cashews', 'mango', 'cilantro leaves', 'pepitas', 'white chicken',
'chipotles', 'black pepper', 'scallions', 'pumpkin puree', 'water', 'serrano chili', 'lemon juice',
'smoked paprika', 'homemade apricot honey preserves', 'eggs', 'salt', 'flank steak', 'fresh cilantro leaves',
'cider vinegar', 'cloves', 'purple sweet potato', 'coconut yogurt', 'green onions', 'tilapia',
'yellow bell pepper', 'coconut oil', 'whole chicken', 'coconut oil', 'safflower oil', 'roma tomatoes',
'fresh red chili', 'fresh thai chili', 'shrimp', 'garlic', 'onions', 'lime', 'avocado', 'fresh parsley',
'cauliflower', 'shredded red cabbage', 'basil', 'baking soda', 'serrano chili',
'cherry tomatoes', 'kale', 'bacon', 'kosher salt', 'mangoes', 'lacinato kale', 'shallots', 'pineapple',
'chipotle chili', 'white vinegar', 'honey', 'tomatoes', 'homemade tamarind concentrate',
'mexican oregano', 'olive oil', 'pine nuts', 'garlic powder', 'coconut flour', 'green bell pepper',
'dried apricots', 'cumin', 'nutmeg', 'kosher salt', 'onions', 'mustard seed', 'lemons', 'lime zest',
'ground cumin', 'almond butter', 'chili powder', 'lime juice', 'paleo mayonnaise', 'pork chops',
'cilantro', 'onion', 'red bell pepper', 'paleo parmesan cheese', 'radishes', 'avocado oil',
'dijon mustard', 'avocado mayonnaise', 'castelfranco radicchio', 'worcestershire sauce', 'treviso'
}
KETO = {
'cinnamon', 'avocado oil', 'chives', 'sriacha', 'almond flour', 'crunchy peanut butter',
'cucumbers', 'cream cheese', 'red cabbage', 'red wine vinegar', 'brussel sprouts', 'black pepper',
'cardamom powder', 'mustard seeds', 'scallions', 'kecap manis', 'lemon juice', 'eggs', 'tahini',
'cloves', 'green onions', 'dijon mustard', 'garlic paste', 'watermelon radishes', 'parmesan',
'parsley', 'star anise', 'fresh cucumber', 'fresh red chili', 'shrimp', 'garlic', 'oregano',
'fennel bulb', 'harissa', 'dutch carrot', 'fresh basil', 'avocado', 'clove powder', 'coriander seeds',
'thyme', 'fresh parsley', 'chicken', 'cauliflower', 'basil', 'watercress', 'cinnamon powder',
'cherry tomatoes', 'soy sauce', 'sesame seeds', 'micro cilantro', 'mozzarella cheese', 'shallots',
'mango powder', 'chipotle chili', 'olive oil', 'spinach', 'pink peppercorns', 'coconut flour',
'salmon steaks', 'dark soy sauce', 'red chili powder', 'turmeric powder', 'spring onions',
'lime juice', 'ginger garlic paste', 'pork chops', 'peanuts', 'dried fenugreek leaves', 'cilantro',
'onion', 'salmon fillets', 'toasted buckwheat', 'whole small crimini mushrooms', 'caster sugar',
'granny smith apples', 'green cabbage', 'apple cider vinegar', 'chili flakes', 'parmesan cheese',
'hing', 'castelfranco radicchio', 'cilantro leaves', 'fresh greek yogurt', 'roasted chicken', 'ghee',
'flaxmeal', 'flank steak', 'salt', 'coriander powder', 'boned chicken', 'red chili flakes',
'garam masala', 'almond meal', 'peanut oil', 'tomato paste', 'oyster sauce',
'curry leaves', 'fresh ginger', 'cardamom', 'radishes', 'little gem lettuce heads',
'grilled king fish', 'carrot', 'cinnamon sticks', 'heavy cream', 'asparagus', 'nigella seeds',
'light soy sauce', 'pork belly', 'green chili', 'mangoes', 'red and green thai chili', 'butter',
'vinegar', 'dill', 'fish sauce', 'white vinegar', 'tomatoes', 'mirin',
'avocado mayonnaise', 'turmeric', 'lemon', 'cumin', 'fennel seeds', 'lemon juice', 'salt',
'roasted peanuts', 'ginger', 'red onion', 'rosemary', 'cumin powder', 'cashew nuts', 'pecans',
'green chili','whole small crimini mushrooms', 'monk fruit', 'sour cream'
}
OMNIVORE = {
'clams', 'prawns', 'white wine vinegar', 'date syrup', 'limes', 'tomato', 'coriander',
'black chickpeas', 'yellow bell pepper', 'black cardamom', 'baby squid', 'pepitas',
'red cabbage', 'baby scallops', 'green cardamom', 'black pepper', 'chaat masala', 'water',
'lemon juice', 'tahini', 'cloves', 'white pepper', 'fennel bulbs', 'tomato puree',
'maggi cubes', 'couscous', 'yellow mustard', 'parsley', 'sriracha', 'roma tomatoes',
'shrimp', 'garlic', 'oregano', 'chicken wings', 'yukon gold potato', 'harissa', 'onions',
'avocado', 'thyme', 'chicken', 'sugar', 'flat-leaf parsley', 'celery seeds', 'cherry tomatoes',
'mayonnaise', 'scallion chutney', 'red pepper flakes', 'hazelnuts', 'soy sauce', 'sesame seeds',
'red snapper', 'white onion', 'vegetable bullion', 'marjoram', 'pani puri', 'olive oil', 'rice',
'serrano chili', 'tamarind concentrate', 'lime juice', 'white wine', 'beef brisket', 'cilantro',
'onion', 'crushed red pepper flakes', 'chiles de árbol', 'fresh mint', 'zucchini', 'red bell pepper',
'yoghurt', 'apple cider vinegar', 'parmesan cheese', 'slivered almonds', 'whole-milk yogurt',
'anchovy fillets', 'fresh ricotta', 'mint', 'chile manzano', 'roasted chicken', 'sea salt',
'fresh thyme', 'vegetable oil', 'salt', 'mexican crema', 'celery', 'yellow onion',
'worcestershire sauce', 'fresh tortillas', 'tomato paste', 'oranges', 'chickpeas',
'scotch bonnet pepper', 'shelled large shrimp', 'mussels', 'summer squash', 'salsa',
'garlic cloves', 'fish stock', 'bell pepper', 'green bell pepper', 'carrot', 'cinnamon sticks',
'thin sev', 'brown sugar', 'baby carrot', 'bacon', 'kosher salt', 'bay leaves', 'anaheim chili',
'oaxaca cheese', 'butter', 'vinegar', 'crab legs', 'white vinegar', 'honey', 'tomatoes',
'green cabbage', 'toasted bread', 'turmeric', 'lemon', 'cumin', 'black peppercorns', 'poblano chili',
'arborio risotto rice', 'fresh corn tortillas', 'balsamic vinegar', 'rhubarb', 'ginger',
'guajillo chile', 'filo pastry', 'leg of lamb', 'red onion', 'chipotle adobo sauce', 'rosemary',
'chili powder', 'beer', 'carrot'
}
SPECIAL_INGREDIENTS = {'cream','bacon', 'garlic', 'baby scallops', 'mussels', 'baby squid', 'cashews', 'salmon fillets',
'filo pastry', 'almonds', 'milk', 'blue cheese', 'clams', 'shrimp', 'tomato puree', 'chocolate',
'honey', 'anchovy fillets', 'bulgur', 'prawns', 'parmesan cheese', 'fish', 'shelled large shrimp',
'gluten', 'crab legs', 'feta cheese', 'whole-milk yogurt', 'crema', 'firm tofu', 'fish stock',
'fresh ricotta', 'tomato paste', 'fresh cherry tomatoes', 'pork chops', 'eggs', 'greek yogurt',
'hazelnuts', 'pecans', 'brie cheese', 'oaxaca cheese', 'yellow onion', 'whey', 'silken tofu',
'toasted bread', 'parmesan', 'beef', 'tofu', 'flour', 'tomatoes', 'red onion', 'slivered almonds',
'strawberries', 'onions', 'pine nuts', 'cherry tomatoes', 'soy sauce', 'oyster sauce',
'mozzarella cheese', 'roma tomatoes', 'heavy cream', 'paneer', 'pork tenderloin', 'garlic cloves',
'swiss cheese', 'grilled king fish', 'ground almonds', 'tilapia', 'sprint onion', 'couscous',
'walnuts', 'semolina', 'yogurt', 'cotija cheese', 'oysters', 'spaghetti', 'cheddar cheese',
'butter', 'lobster', 'smoked tofu', 'peanuts', 'ground pork', 'fresh cherry bocconcini',
'pork belly', 'toasted peanuts', 'roasted peanuts'
}
ALCOHOLS = {"whiskey", "whisky", "white rum", "dark rum", "bourbon", "rye", "scotch", "vodka",
"tequila", "gin", "dry vermouth", "sweet vermouth", "prosecco","aperol", "brandy", "mezcal",
"triple sec", "coffee liqueur", "almond liqueur", "champagne", "orange curacao", "rum"
}
VEGAN_INTERSECTIONS = {'brown sugar', 'carrot', 'sugar', 'vegetable stock', 'fresh ginger', 'nutritional yeast',
'cayenne pepper', 'olive oil', 'lemon', 'ginger', 'red onion', 'pomegranate molasses',
'onion', 'water', 'chickpea flour', 'orange zest', 'coconut oil', 'smoked paprika',
'lemon zest', 'sunflower oil', 'orange juice', 'black pepper', 'cinnamon powder',
'mushrooms', 'cloves', 'salt', 'oil', 'vegan butter', 'turmeric', 'tomato paste',
'mustard seeds', 'bell pepper', 'rosemary', 'vinegar', 'tomatoes', 'flour', 'soy sauce',
'lemon juice', 'garlic'}
VEGETARIAN_INTERSECTIONS = {'carrot', 'milk', 'basil', 'green lentils', 'vegetable bullion', 'red onions',
'balsamic vinegar', 'lemon', 'olive oil', 'butter', 'honey', 'red chili',
'red onion', 'breadcrumbs', 'lemon zest', 'pepitas', 'black pepper', 'fresh peas',
'salt', 'firm tofu', 'ricotta cheese', 'kosher salt', 'watercress', 'cream',
'parmesan cheese', 'shallots', 'rosemary', 'sage', 'tomatoes', 'walnuts',
'lemon juice', 'thyme', 'garlic', 'eggs', 'red wine vinegar'}
PALEO_INTERSECTIONS = {'basil', 'olive oil', 'honey', 'pine nuts', 'baking soda', 'shrimp', 'cherry tomatoes',
'coconut oil', 'cinnamon', 'lemon zest', 'cumin', 'black pepper', 'lime', 'salt',
'zucchini', 'kosher salt', 'chipotle chili', 'eggs', 'coconut flour', 'avocado',
'cauliflower', 'serrano chili', 'safflower oil', 'tomatoes', 'lemon juice', 'onions',
'garlic'}
KETO_INTERSECTIONS = {'fresh cucumber', 'red cabbage', 'olive oil', 'ginger', 'butter', 'dill', 'red onion',
'monk fruit', 'cherry tomatoes', 'spring onions', 'lime juice', 'fish sauce',
'sesame seeds', 'black pepper', 'salt', 'chives', 'asparagus', 'eggs',
'avocado mayonnaise', 'rosemary', 'cauliflower', 'flank steak', 'lemon juice',
'garlic'}
OMNIVORE_INTERSECTIONS = {'mint', 'carrot', 'fresh mint', 'olive oil', 'lemon', 'ginger', 'butter', 'honey',
'leg of lamb', 'red onion', 'bay leaves', 'tamarind concentrate',
'worcestershire sauce', 'onion', 'lime juice', 'water', 'anchovy fillets', 'celery',
'black pepper', 'cilantro', 'chili powder', 'salt', 'mayonnaise', 'garlic cloves',
'kosher salt', 'white onion', 'turmeric', 'rosemary', 'vinegar', 'tomatoes',
'sea salt', 'soy sauce', 'lemon juice', 'onions', 'thyme', 'garlic', 'avocado',
'fresh corn tortillas', 'tomato paste'}
EXAMPLE_INTERSECTION = {'fresh red chili', 'sugar', 'nutritional yeast', 'fresh ginger', 'red chili powder', 'garlic',
'olive oil', 'mashed potatoes', 'garam masala', 'clove powder', 'cumin powder', 'onion',
'chickpea flour', 'water', 'turmeric powder', 'hing', 'black pepper', 'cinnamon powder',
'cilantro', 'salt', 'oil', 'cardamom powder', 'turmeric', 'garlic paste', 'mustard seeds',
'vinegar', 'mangoes', 'nigella seeds', 'serrano chili', 'flour', 'soy sauce', 'coriander seeds',
'coriander powder', 'lemon juice', 'mango powder', 'curry leaves'}
example_dishes = [
{'salt', 'breadcrumbs', 'water', 'flour', 'celeriac', 'chickpea flour', 'soy sauce', 'parsley',
'sunflower oil', 'lemon', 'black pepper'},
{'cornstarch', 'salt', 'vegetable oil', 'sugar', 'vegetable stock', 'water', 'tofu', 'soy sauce',
'lemon zest', 'lemon juice', 'black pepper', 'ginger', 'garlic'},
{'salt', 'mixed herbs', 'silken tofu', 'smoked tofu', 'nutritional yeast', 'turmeric', 'soy sauce',
'garlic', 'lemon juice', 'olive oil', 'black pepper', 'spaghetti'},
{'salt', 'mushrooms', 'sugar', 'barley malt', 'nutritional yeast', 'fresh basil', 'olive oil',
'honey', 'yeast', 'red onion', 'bell pepper', 'cashews', 'oregano', 'rosemary', 'garlic powder',
'tomatoes', 'water', 'flour', 'red pepper flakes', 'garlic'},
{'mango powder', 'oil', 'salt', 'cardamom powder', 'fresh red chili', 'sugar', 'fresh ginger',
'turmeric', 'red chili powder', 'curry leaves', 'garlic paste', 'mustard seeds', 'vinegar',
'mashed potatoes', 'garam masala', 'mangoes', 'nigella seeds', 'clove powder', 'serrano chili',
'cumin powder', 'onion', 'water', 'chickpea flour', 'coriander seeds', 'turmeric powder', 'hing',
'coriander powder', 'cinnamon powder', 'cilantro', 'garlic'},
{'mango powder', 'oil', 'salt', 'cardamom powder', 'fresh red chili', 'sugar', 'fresh ginger',
'turmeric', 'red chili powder', 'curry leaves', 'garlic paste', 'mustard seeds', 'vinegar',
'mashed potatoes', 'garam masala', 'mangoes', 'nigella seeds', 'clove powder', 'serrano chili',
'cumin powder', 'onion', 'water', 'chickpea flour', 'coriander seeds', 'turmeric powder', 'hing',
'coriander powder', 'cinnamon powder', 'cilantro', 'garlic'}
]
| mit | 4e50b483db4d314cf5a2e58c571cefdd | 86.033019 | 121 | 0.582895 | 2.846937 | false | false | true | false |
hendrix/hendrix | hendrix/contrib/concurrency/resources.py | 3 | 2381 | import json
import uuid
from twisted.internet import threads
from twisted.internet.protocol import Protocol
from hendrix.facilities.resources import NamedResource
from .messaging import hxdispatcher
def send_django_signal(transport, data):
from .signals import message_signal
message_signal.send(None, dispatcher=transport, data=data)
class MessageHandlerProtocol(Protocol):
"""
A basic protocol for socket messaging
using a hendrix messaging dispatcher to handle
addressing messages to active sockets from
different contexts
"""
dispatcher = hxdispatcher
guid = None
def dataReceived(self, data):
"""
Takes "data" which we assume is json encoded
If data has a subject_id attribute, we pass that to the dispatcher
as the subject_id so it will get carried through into any
return communications and be identifiable to the client
falls back to just passing the message along...
"""
try:
address = self.guid
data = json.loads(data)
threads.deferToThread(send_signal, self.dispatcher, data)
if 'hx_subscribe' in data:
return self.dispatcher.subscribe(self.transport, data)
if 'address' in data:
address = data['address']
else:
address = self.guid
self.dispatcher.send(address, data)
except Exception as e:
raise
self.dispatcher.send(
self.guid,
{'message': data, 'error': str(e)}
)
def connectionMade(self):
"""
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
"""
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {'setup_connection': self.guid})
def connectionLost(self, something):
"clean up the no longer useful socket in the dispatcher"
self.dispatcher.remove(self.transport)
MessageResource = NamedResource('messages')
| mit | ed99fe148edfe2983a36666876315fd2 | 29.525641 | 78 | 0.634607 | 4.790744 | false | false | false | false |
hendrix/hendrix | test/test_request_behavior.py | 1 | 2050 | import io
import os
import pytest_twisted
import requests
from twisted.internet import threads
from hendrix.deploy.base import HendrixDeploy
from hendrix.facilities.resources import MediaResource
from .resources import application
@pytest_twisted.inlineCallbacks
def test_max_upload_bytes():
statics_path = 'path_on_disk/to/files'
os.makedirs(statics_path, exist_ok=True)
options = {
'wsgi': application,
'max_upload_bytes': 200,
'http_port': 9876,
'resources': [MediaResource(statics_path, namespace=b'statics')]
}
deployer = HendrixDeploy(options=options)
deployer.addServices()
deployer.start()
def reject_large_uploads():
# uploading 50 bytes is fine.
byte_count = 50
ok_data = io.BytesIO(os.urandom(byte_count))
response = requests.post(
"http://localhost:9876/",
files={'data': ok_data}
)
assert 200 == response.status_code
# upload more than our max bytes and we fail with a 413
byte_count = 201
too_big = io.BytesIO(os.urandom(byte_count))
response = requests.post(
"http://localhost:9876/",
files={'data': too_big}
)
assert 413 == response.status_code
assert response.reason == "Request Entity Too Large"
def test_static_files():
js_file = 'showmethisfile.js'
filepath = os.path.join(statics_path, js_file)
open(filepath, 'w').write('//console.log("Hello World");')
response = requests.get(
f"http://localhost:9876/statics/{js_file}",
)
assert response.status_code == 200
assert '//console.log("Hello World");' in response.text
os.remove(filepath)
os.removedirs(statics_path)
response = requests.get(
f"http://localhost:9876/statics/{js_file}",
)
assert response.status_code == 404
yield threads.deferToThread(reject_large_uploads)
yield threads.deferToThread(test_static_files)
| mit | 4c3f742ef029cf24c5cfb29dca2d1d94 | 28.285714 | 72 | 0.623902 | 3.831776 | false | true | false | false |
pastas/pastas | pastas/solver.py | 1 | 19679 | """This module contains the different solvers that are available for Pastas.
All solvers inherit from the BaseSolver class, which contains general method
for selecting the correct time series to misfit and options to weight the
residuals or noise series.
To solve a model the following syntax can be used:
>>> ml.solve(solver=ps.LeastSquares)
"""
from logging import getLogger
import numpy as np
from pandas import DataFrame
from scipy.linalg import svd
from scipy.optimize import least_squares
logger = getLogger(__name__)
class BaseSolver:
_name = "BaseSolver"
__doc__ = """All solver instances inherit from the BaseSolver class.
Attributes
----------
model: pastas.Model instance
pcor: pandas.DataFrame
Pandas DataFrame with the correlation between the optimized parameters.
pcov: pandas.DataFrame
Pandas DataFrame with the correlation between the optimized parameters.
nfev: int
Number of times the model is called during optimization.
result: object
The object returned by the minimization method that is used. It depends
on the solver what is actually returned.
"""
def __init__(self, ml, pcov=None, nfev=None, obj_func=None, **kwargs):
self.ml = ml
self.pcov = pcov # Covariances of the parameters
if pcov is None:
self.pcor = None # Correlation between parameters
else:
self.pcor = self._get_correlations(pcov)
self.nfev = nfev # number of function evaluations
self.obj_func = obj_func
self.result = None # Object returned by the optimization method
def misfit(self, p, noise, weights=None, callback=None,
returnseparate=False):
"""This method is called by all solvers to obtain a series that are
minimized in the optimization process. It handles the application of
the weights, a noisemodel and other optimization options.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
noise: Boolean
weights: pandas.Series, optional
pandas Series by which the residual or noise series are
multiplied. Typically values between 0 and 1.
callback: ufunc, optional
function that is called after each iteration. the parameters are
provided to the func. E.g. "callback(parameters)"
returnseparate: bool, optional
return residuals, noise, noiseweights
Returns
-------
rv:
residuals series (if noise=False) or noise series (if noise=True)
"""
# Get the residuals or the noise
if noise:
rv = self.ml.noise(p) * \
self.ml.noise_weights(p)
else:
rv = self.ml.residuals(p)
# Determine if weights need to be applied
if weights is not None:
weights = weights.reindex(rv.index)
weights.fillna(1.0, inplace=True)
rv = rv.multiply(weights)
if callback:
callback(p)
if returnseparate:
return self.ml.residuals(p).values, \
self.ml.noise(p).values, \
self.ml.noise_weights(p).values
return rv.values
def prediction_interval(self, n=1000, alpha=0.05, max_iter=10, **kwargs):
"""Method to calculate the prediction interval for the simulation.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
prediction interval (for alpha=0.05)
Notes
-----
Add residuals assuming a Normal distribution with standard deviation
equal to the standard deviation of the residuals.
"""
sigr = self.ml.residuals().std()
data = self._get_realizations(func=self.ml.simulate, n=n, name=None,
max_iter=max_iter, **kwargs)
data = data + sigr * np.random.randn(data.shape[0], data.shape[1])
q = [alpha / 2, 1 - alpha / 2]
rv = data.quantile(q, axis=1).transpose()
return rv
def ci_simulation(self, n=1000, alpha=0.05, max_iter=10, **kwargs):
"""Method to calculate the confidence interval for the simulation.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
interval (for alpha=0.05)
Notes
-----
The confidence interval shows the uncertainty in the simulation due
to parameter uncertainty. In other words, there is a 95% probability
that the true best-fit line for the observed data lies within the
95% confidence interval.
"""
return self._get_confidence_interval(func=self.ml.simulate, n=n,
alpha=alpha, max_iter=max_iter,
**kwargs)
def ci_block_response(self, name, n=1000, alpha=0.05, max_iter=10,
**kwargs):
"""Method to calculate the confidence interval for the block response.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
interval (for alpha=0.05)
Notes
-----
The confidence interval shows the uncertainty in the simulation due
to parameter uncertainty. In other words, there is a 95% probability
that the true best-fit line for the observed data lies within the
95% confidence interval.
"""
dt = self.ml.get_block_response(name=name).index.values
return self._get_confidence_interval(func=self.ml.get_block_response,
n=n, alpha=alpha, name=name,
max_iter=max_iter, dt=dt,
**kwargs)
def ci_step_response(self, name, n=1000, alpha=0.05, max_iter=10,
**kwargs):
"""Method to calculate the confidence interval for the step response.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
interval (for alpha=0.05)
Notes
-----
The confidence interval shows the uncertainty in the simulation due
to parameter uncertainty. In other words, there is a 95% probability
that the true best-fit line for the observed data lies within the
95% confidence interval.
"""
dt = self.ml.get_block_response(name=name).index.values
return self._get_confidence_interval(func=self.ml.get_step_response,
n=n, alpha=alpha, name=name,
max_iter=max_iter, dt=dt,
**kwargs)
def ci_contribution(self, name, n=1000, alpha=0.05, max_iter=10, **kwargs):
"""Method to calculate the confidence interval for the contribution.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
interval (for alpha=0.05)
Notes
-----
The confidence interval shows the uncertainty in the simulation due
to parameter uncertainty. In other words, there is a 95% probability
that the true best-fit line for the observed data lies within the
95% confidence interval.
"""
return self._get_confidence_interval(func=self.ml.get_contribution,
n=n, alpha=alpha, name=name,
max_iter=max_iter,
**kwargs)
def get_parameter_sample(self, name=None, n=None, max_iter=10):
"""Method to obtain a parameter sets for monte carlo analyses.
Parameters
----------
n: int, optional
Number of random samples drawn from the bivariate normal
distribution.
name: str, optional
Name of the stressmodel or model component to obtain the
parameters for.
max_iter : int, optional
maximum number of iterations for truncated multivariate
sampling, default is 10. Increase this value if number of
accepted parameter samples is lower than n.
Returns
-------
numpy.ndarray
Numpy array with N parameter samples.
"""
p = self.ml.get_parameters(name=name)
pcov = self._get_covariance_matrix(name=name)
if name is None:
parameters = self.ml.parameters
else:
parameters = self.ml.parameters.loc[
self.ml.parameters.name == name]
pmin = parameters.pmin.fillna(-np.inf).values
pmax = parameters.pmax.fillna(np.inf).values
if n is None:
# only use parameters that are varied.
n = int(10 ** parameters.vary.sum())
samples = np.zeros((0, p.size))
# Start truncated multivariate sampling
it = 0
while samples.shape[0] < n:
s = np.random.multivariate_normal(p, pcov, size=(n,),
check_valid="ignore")
accept = s[(np.min(s - pmin, axis=1) >= 0) &
(np.max(s - pmax, axis=1) <= 0)]
samples = np.concatenate((samples, accept), axis=0)
# Make sure there's no endless while loop
if it > max_iter:
break
else:
it += 1
if samples.shape[0] < n:
logger.warning("Parameter sample size is smaller than n: "
f"{samples.shape[0]}/{n}. Increase 'max_iter'.")
return samples[:n, :]
def _get_realizations(self, func, n=None, name=None, max_iter=10,
**kwargs):
"""Internal method to obtain n number of parameter realizations."""
if name:
kwargs["name"] = name
parameter_sample = self.get_parameter_sample(n=n, name=name,
max_iter=max_iter)
data = {}
for i, p in enumerate(parameter_sample):
data[i] = func(p=p, **kwargs)
return DataFrame.from_dict(data, orient="columns")
def _get_confidence_interval(self, func, n=None, name=None, alpha=0.05,
max_iter=10, **kwargs):
"""Internal method to obtain a confidence interval."""
q = [alpha / 2, 1 - alpha / 2]
data = self._get_realizations(func=func, n=n, name=name,
max_iter=max_iter, **kwargs)
return data.quantile(q=q, axis=1).transpose()
def _get_covariance_matrix(self, name=None):
"""Internal method to obtain the covariance matrix from the model.
Parameters
----------
name: str, optional
Name of the stressmodel or model component to obtain the
parameters for.
Returns
-------
pcov: pandas.DataFrame
Pandas DataFrame with the covariances for the parameters.
"""
if name:
index = self.ml.parameters.loc[
self.ml.parameters.loc[:, "name"] == name].index
else:
index = self.ml.parameters.index
pcov = self.pcov.reindex(index=index, columns=index).fillna(0)
return pcov
@staticmethod
def _get_correlations(pcov):
"""Internal method to obtain the parameter correlations from the
covariance matrix.
Parameters
----------
pcov: pandas.DataFrame
n x n Pandas DataFrame with the covariances.
Returns
-------
pcor: pandas.DataFrame
n x n Pandas DataFrame with the correlations.
"""
index = pcov.index
pcov = pcov.to_numpy()
v = np.sqrt(np.diag(pcov))
with np.errstate(divide='ignore', invalid='ignore'):
corr = pcov / np.outer(v, v)
corr[pcov == 0] = 0
pcor = DataFrame(data=corr, index=index, columns=index)
return pcor
def to_dict(self):
data = {
"name": self._name,
"pcov": self.pcov,
"nfev": self.nfev,
"obj_func": self.obj_func
}
return data
class LeastSquares(BaseSolver):
_name = "LeastSquares"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solver based on Scipy's least_squares method [scipy_ref]_.
Notes
-----
This class is the default solve method called by the pastas Model solve
method. All kwargs provided to the Model.solve() method are forwarded
to the solver. From there, they are forwarded to Scipy least_squares
solver.
Examples
--------
>>> ml.solve(solver=ps.LeastSquares)
References
----------
.. [scipy_ref] https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, **kwargs):
self.vary = self.ml.parameters.vary.values.astype(bool)
self.initial = self.ml.parameters.initial.values.copy()
parameters = self.ml.parameters.loc[self.vary]
# Set the boundaries
bounds = (np.where(parameters.pmin.isnull(), -np.inf, parameters.pmin),
np.where(parameters.pmax.isnull(), np.inf, parameters.pmax))
self.result = least_squares(self.objfunction, bounds=bounds,
x0=parameters.initial.values,
args=(noise, weights, callback), **kwargs)
self.pcov = DataFrame(self._get_covariances(self.result.jac,
self.result.cost),
index=parameters.index, columns=parameters.index)
self.pcor = self._get_correlations(self.pcov)
self.nfev = self.result.nfev
self.obj_func = self.result.cost
# Prepare return values
success = self.result.success
optimal = self.initial
optimal[self.vary] = self.result.x
stderr = np.zeros(len(optimal)) * np.nan
stderr[self.vary] = np.sqrt(np.diag(self.pcov))
return success, optimal, stderr
def objfunction(self, p, noise, weights, callback):
par = self.initial
par[self.vary] = p
return self.misfit(p=par, noise=noise, weights=weights,
callback=callback)
def _get_covariances(self, jacobian, cost, absolute_sigma=False):
"""Internal method to get the covariance matrix from the jacobian.
Parameters
----------
jacobian: numpy.ndarray
cost: float
absolute_sigma: bool
Default is False
Returns
-------
pcov: numpy.array
numpy array with the covariance matrix.
Notes
-----
This method is copied from Scipy, please refer to:
https://github.com/scipy/scipy/blob/v1.0.0/scipy/optimize/optimize.py
"""
cost = 2 * cost # res.cost is half sum of squares!
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(jacobian, full_matrices=False)
threshold = np.finfo(float).eps * max(jacobian.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s ** 2, VT)
n_param = self.ml.parameters.index.size
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = np.zeros((n_param, n_param), dtype=float)
pcov.fill(np.inf)
warn_cov = True
elif not absolute_sigma:
if self.ml.oseries.series.index.size > n_param:
s_sq = cost / (self.ml.oseries.series.index.size - n_param)
pcov = pcov * s_sq
else:
pcov.fill(np.inf)
warn_cov = True
if warn_cov:
logger.warning(
'Covariance of the parameters could not be estimated')
return pcov
class LmfitSolve(BaseSolver):
_name = "LmfitSolve"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solving the model using the LmFit solver [LM]_.
This is basically a wrapper around the scipy solvers, adding some
cool functionality for boundary conditions.
References
----------
.. [LM] https://github.com/lmfit/lmfit-py/
"""
try:
global lmfit
import lmfit as lmfit # Import Lmfit here, so it is no dependency
except ImportError:
msg = "lmfit not installed. Please install lmfit first."
raise ImportError(msg)
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, method="leastsq",
**kwargs):
# Deal with the parameters
parameters = lmfit.Parameters()
p = self.ml.parameters.loc[:, ['initial', 'pmin', 'pmax', 'vary']]
for k in p.index:
pp = np.where(p.loc[k].isnull(), None, p.loc[k])
parameters.add(k, value=pp[0], min=pp[1], max=pp[2], vary=pp[3])
# Create the Minimizer object and minimize
self.mini = lmfit.Minimizer(userfcn=self.objfunction, calc_covar=True,
fcn_args=(noise, weights, callback),
params=parameters, **kwargs)
self.result = self.mini.minimize(method=method)
# Set all parameter attributes
pcov = None
if hasattr(self.result, "covar"):
if self.result.covar is not None:
pcov = self.result.covar
names = self.result.var_names
self.pcov = DataFrame(pcov, index=names, columns=names, dtype=float)
self.pcor = self._get_correlations(self.pcov)
# Set all optimization attributes
self.nfev = self.result.nfev
self.obj_func = self.result.chisqr
if hasattr(self.result, "success"):
success = self.result.success
else:
success = True
optimal = np.array([p.value for p in self.result.params.values()])
stderr = np.array([p.stderr for p in self.result.params.values()])
idx = None
if "is_weighted" in kwargs:
if not kwargs["is_weighted"]:
idx = -1
return success, optimal[:idx], stderr[:idx]
def objfunction(self, parameters, noise, weights, callback):
p = np.array([p.value for p in parameters.values()])
return self.misfit(p=p, noise=noise, weights=weights,
callback=callback)
| mit | 170b489f7cb6148a86e4221ba81a4e9b | 35.714552 | 109 | 0.567 | 4.294849 | false | false | false | false |
pastas/pastas | pastas/stats/metrics.py | 2 | 17960 | """The following methods may be used to describe the fit between the model
simulation and the observations.
Examples
========
These methods may be used as follows:
>>> ps.stats.rmse(sim, obs)
or
>>> ml.stats.rmse()
"""
from logging import getLogger
from numpy import abs, average, log, nan, sqrt
from pastas.stats.core import _get_weights, mean, std, var
__all__ = ["rmse", "sse", "mae", "nse", "evp", "rsq", "bic", "aic",
"pearsonr", "kge_2012"]
logger = getLogger(__name__)
# Absolute Error Metrics
def mae(obs=None, sim=None, res=None, missing="drop", weighted=False,
max_gap=30):
"""Compute the (weighted) Mean Absolute Error (MAE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is True.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
The Mean Absolute Error (MAE) between two time series x and y is
computed as follows:
.. math:: \\text{MAE} = \\sum_{i=1}^{N} w_i |x_i - y_i|
where :math:`N` is the number of observations in the observed time series.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
w = _get_weights(res, weighted=weighted, max_gap=max_gap)
return (w * abs(res.to_numpy())).sum()
def rmse(obs=None, sim=None, res=None, missing="drop", weighted=False,
max_gap=30):
"""Compute the (weighted) Root Mean Squared Error (RMSE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
Computes the Root Mean Squared Error (RMSE) as follows:
.. math:: \\text{RMSE} = \\sqrt{\\sum_{i=1}^{N} w_i n_i^2}
where :math:`N` is the number of residuals :math:`n`.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
w = _get_weights(res, weighted=weighted, max_gap=max_gap)
return sqrt((w * res.to_numpy() ** 2).sum())
def sse(obs=None, sim=None, res=None, missing="drop"):
"""Compute the Sum of the Squared Errors (SSE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
Notes
-----
The Sum of the Squared Errors (SSE) is calculated as follows:
.. math:: \\text{SSE} = \\sum(r^2)
Where :math:`r` are the residuals.
"""
if res is None:
res = (sim - obs)
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
return (res.to_numpy() ** 2).sum()
# Percentage Error Metrics
def pearsonr(obs, sim, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Pearson correlation (r).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
missing: str, optional
string with the rule to deal with missing values in the
observed series. Only "drop" is supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
The Pearson correlation (r) is computed as follows:
.. math:: r = \\frac{\\sum_{i=1}^{N}w_i (x_i - \\bar{x})(y_i - \\bar{y})}
{\\sqrt{\\sum_{i=1}^{N} w_i(x_i-\\bar{x})^2 \\sum_{i=1}^{N}
w_i(y_i-\\bar{y})^2}}
Where :math:`x` is is observed time series, :math:`y` the simulated
time series, and :math:`N` the number of observations in the observed
time series.
"""
if missing == "drop":
obs = obs.dropna()
w = _get_weights(obs, weighted=weighted, max_gap=max_gap)
sim = sim.reindex(obs.index).dropna().to_numpy()
# Return nan if the time indices of the sim and obs don't match
if sim.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
sim = sim - average(sim, weights=w)
obs = obs.to_numpy() - average(obs.to_numpy(), weights=w)
r = (w * sim * obs).sum() / \
sqrt((w * sim ** 2).sum() * (w * obs ** 2).sum())
return r
def evp(obs, sim=None, res=None, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Explained Variance Percentage (EVP).
Parameters
----------
obs: pandas.Series
Series with the observed values.
sim: pandas.Series
Series with the simulated values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
Commonly used goodness-of-fit metric groundwater level models as
computed in [asmuth_2012]_.
.. math:: \\text{EVP} = \\frac{\\sigma_h^2 - \\sigma_r^2}{\\sigma_h^2}
* 100
where :math:`\\sigma_h^2` is the variance of the observations and
:math:`\\sigma_r^2` is the variance of the residuals. The returned value
is bounded between 0% and 100%.
References
----------
.. [asmuth_2012] von Asmuth, J., K. Maas, M. Knotters, M. Bierkens,
M. Bakker, T.N. Olsthoorn, D.G. Cirkel, I. Leunk, F. Schaars, and D.C.
von Asmuth. 2012. Software for hydrogeologic time series analysis,
interfacing data with physical insight. Environmental Modelling &
Software 38: 178–130.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
if obs.var() == 0.0:
return 100.
else:
return max(0.0, (1 - var(res, weighted=weighted, max_gap=max_gap) /
var(obs, weighted=weighted, max_gap=max_gap))) * 100
def nse(obs, sim=None, res=None, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Nash-Sutcliffe Efficiency (NSE).
Parameters
----------
obs: pandas.Series
Series with the observed values.
sim: pandas.Series
Series with the simulated values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
NSE computed according to [nash_1970]_
.. math:: \\text{NSE} = 1 - \\frac{\\sum(h_s-h_o)^2}{\\sum(h_o-\\mu_{h,o})}
References
----------
.. [nash_1970] Nash, J. E., & Sutcliffe, J. V. (1970). River flow
forecasting through conceptual models part I-A discussion of
principles. Journal of hydrology, 10(3), 282-230.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
w = _get_weights(res, weighted=weighted, max_gap=max_gap)
mu = average(obs.to_numpy(), weights=w)
return 1 - (w * res.to_numpy() ** 2).sum() / \
(w * (obs.to_numpy() - mu) ** 2).sum()
def rsq(obs, sim=None, res=None, missing="drop", weighted=False, max_gap=30,
nparam=None):
"""Compute R-squared, possibly adjusted for the number of free parameters.
Parameters
----------
obs: pandas.Series
Series with the observed values.
sim: pandas.Series
Series with the simulated values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
If weighted is True, the variances are computed using the time
step between observations as weights. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
nparam: int, optional
number of calibrated parameters.
Notes
-----
.. math:: \\rho_{adj} = 1- \\frac{n-1}{n-n_{param}}*\\frac{rss}{tss}
Where n is the number of observations, :math:`n_{param}` the number of
free parameters, rss the sum of the squared residuals, and tss the total
sum of squared residuals.
When nparam is provided, the :math:`\\rho` is
adjusted for the number of calibration parameters.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
w = _get_weights(res, weighted=weighted, max_gap=max_gap)
mu = average(obs.to_numpy(), weights=w)
rss = (w * res.to_numpy() ** 2.0).sum()
tss = (w * (obs.to_numpy() - mu) ** 2.0).sum()
if nparam:
return 1.0 - (obs.size - 1.0) / (obs.size - nparam) * rss / tss
else:
return 1.0 - rss / tss
def bic(obs=None, sim=None, res=None, missing="drop", nparam=1):
"""Compute the Bayesian Information Criterium (BIC).
Parameters
----------
obs: pandas.Series
Series with the observed values.
sim: pandas.Series
Series with the simulated values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
nparam: int, optional
number of calibrated parameters.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
Notes
-----
The Bayesian Information Criterium (BIC) [akaike_1979]_ is computed as
follows:
.. math:: \\text{BIC} = -2 log(L) + n_{param} * log(N)
where :math:`n_{param}` is the number of calibration parameters.
References
----------
.. [akaike_1979] Akaike, H. (1979). A Bayesian extension of the minimum
AIC procedure of autoregressive model fitting. Biometrika, 66(2),
237-242.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
return (res.index.size *
log((res.to_numpy() ** 2.0).sum() / res.index.size) +
nparam * log(res.index.size))
def aic(obs=None, sim=None, res=None, missing="drop", nparam=1):
"""Compute the Akaike Information Criterium (AIC).
Parameters
----------
obs: pandas.Series
Series with the observed values.
sim: pandas.Series
Series with the simulated values.
res: pandas.Series
Series with the residual values. If time series for the residuals
are provided, the sim and obs arguments are ignored.
nparam: int, optional
number of calibrated parameters.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
Notes
-----
The Akaike Information Criterium (AIC) [akaike_1974]_ is computed as
follows:
.. math:: \\text{AIC} = -2 log(L) + 2 nparam
where :math:`n_{param}` is the number of calibration parameters and L is
the likelihood function for the model.
References
----------
.. [akaike_1974] Akaike, H. (1974). A new look at the statistical model
identification. IEEE transactions on automatic control, 19(6), 716-723.
"""
if res is None:
res = sim - obs
if missing == "drop":
res = res.dropna()
# Return nan if the time indices of the sim and obs don't match
if res.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
return (res.index.size *
log((res.to_numpy() ** 2.0).sum() / res.index.size) + 2.0 * nparam)
# Forecast Error Metrics
def kge_2012(obs, sim, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Kling-Gupta Efficiency (KGE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
The (weighted) Kling-Gupta Efficiency [kling_2012]_ is computed as follows:
.. math:: \\text{KGE} = 1 - \\sqrt{(r-1)^2 + (\\beta-1)^2 - (\\gamma-1)^2}
where :math:`\\beta = \\bar{x} / \\bar{y}` and :math:`\\gamma =
\\frac{\\bar{\\sigma}_x / \\bar{x}}{\\bar{\\sigma}_y / \\bar{y}}`. If
weighted equals True, the weighted mean, variance and pearson
correlation are used.
References
----------
.. [kling_2012] Kling, H., Fuchs, M., and Paulin, M. (2012). Runoff
conditions in the upper Danube basin under an ensemble of climate
change scenarios. Journal of Hydrology, 424-425:264 - 277.
"""
if missing == "drop":
obs = obs.dropna()
sim = sim.reindex(obs.index).dropna()
# Return nan if the time indices of the sim and obs don't match
if sim.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
r = pearsonr(obs=obs, sim=sim, weighted=weighted, max_gap=max_gap)
mu_sim = mean(sim, weighted=weighted, max_gap=max_gap)
mu_obs = mean(obs, weighted=weighted, max_gap=max_gap)
beta = mu_sim / mu_obs
gamma = (std(sim, weighted=weighted, max_gap=max_gap) / mu_sim) / \
(std(obs, weighted=weighted, max_gap=max_gap) / mu_obs)
kge = 1 - sqrt((r - 1) ** 2 + (beta - 1) ** 2 + (gamma - 1) ** 2)
return kge
| mit | c554a2b78b235c9fb6e2914e776d0b25 | 31.829982 | 79 | 0.619835 | 3.732696 | false | false | false | false |
pastas/pastas | tests/test_recharge.py | 1 | 3575 | from pandas import read_csv, Series
from numpy import sin, arange, isclose
import pastas as ps
# Load series before
rain = read_csv("tests/data/rain.csv", index_col=0,
parse_dates=True).squeeze("columns").loc["2005":] * 1e3
evap = read_csv("tests/data/evap.csv", index_col=0,
parse_dates=True).squeeze("columns").loc["2005":] * 1e3
obs = read_csv("tests/data/obs.csv", index_col=0,
parse_dates=True).squeeze("columns")
temp = Series(index=evap.index, data=sin(arange(evap.size) / 365 * 6),
dtype=float)
def test_create_rechargemodel():
rm = ps.RechargeModel(prec=rain, evap=evap)
return rm
def test_default_model():
ml = ps.Model(obs, name="rch_model")
rm = test_create_rechargemodel()
ml.add_stressmodel(rm)
return ml
def test_model_solve():
ml = test_default_model()
ml.solve()
return
def test_model_copy():
ml = test_default_model()
ml.copy()
return
def test_berendrecht():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap, recharge=ps.rch.Berendrecht())
ml.add_stressmodel(rm)
ml.solve()
return
def test_linear():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap, recharge=ps.rch.Linear())
ml.add_stressmodel(rm)
ml.solve()
return
def test_flexmodel():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap, recharge=ps.rch.FlexModel())
ml.add_stressmodel(rm)
ml.solve()
return
def test_flexmodel_no_interception():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap,
recharge=ps.rch.FlexModel(interception=False))
ml.add_stressmodel(rm)
ml.solve()
return
def test_flexmodel_gw_uptake():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap,
recharge=ps.rch.FlexModel(gw_uptake=True))
ml.add_stressmodel(rm)
ml.solve()
return
def test_flexmodel_snow():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap, temp=temp,
recharge=ps.rch.FlexModel(snow=True))
ml.add_stressmodel(rm)
ml.solve()
return
def test_flexmodel_water_balance_rootzone():
rch = ps.rch.FlexModel()
e = evap.to_numpy()
p = rain.to_numpy()
sr, r, ea, q, pe = rch.get_root_zone_balance(p, e)
error = (sr[0] - sr[-1] + (r + ea + q + pe)[:-1].sum())
assert isclose(error, 0)
def test_flexmodel_water_balance_snow():
rch = ps.rch.FlexModel()
p = rain.to_numpy()
t = temp.to_numpy()
ss, snow, m = rch.get_snow_balance(p, t)
error = (ss[0] - ss[-1] + (snow + m)[:-1].sum())
assert isclose(error, 0)
def test_flexmodel_water_balance_interception():
rch = ps.rch.FlexModel()
e = evap.to_numpy()
p = rain.to_numpy()
si, ei, pi = rch.get_interception_balance(p, e)
error = (si[0] - si[-1] + (pi + ei)[:-1].sum())
assert isclose(error, 0)
def test_peterson():
ml = ps.Model(obs, name="rch_model")
rm = ps.RechargeModel(prec=rain, evap=evap, recharge=ps.rch.Peterson())
ml.add_stressmodel(rm)
ml.solve()
return
# don't test water balance because of forward Euler
# def test_peterson_water_balance():
# rch = ps.rch.Peterson()
# e = evap.to_numpy()
# p = rain.to_numpy()
# r, sm, ea, pe = rch.get_recharge(p, e)
# error = (sm[0] - sm[-1] + (r + ea + pe)[:-1].sum())
# assert isclose(error, 0) | mit | 3b5ff27a10f6abe28df2338189b44f85 | 25.887218 | 78 | 0.607832 | 2.773468 | false | true | false | false |
pipermerriam/flex | flex/validation/parameter.py | 1 | 5834 | # Standard libraries
import re
from flex.datastructures import ValidationDict
from flex.utils import is_non_string_iterable
from flex.exceptions import (
ValidationError,
MultipleParametersFound,
NoParameterFound,
)
from flex.error_messages import MESSAGES
from flex.context_managers import ErrorDict
from flex.validation.reference import (
LazyReferenceValidator,
)
from flex.validation.common import (
noop,
generate_type_validator,
generate_format_validator,
generate_multiple_of_validator,
generate_minimum_validator,
generate_maximum_validator,
generate_min_length_validator,
generate_max_length_validator,
generate_min_items_validator,
generate_max_items_validator,
generate_unique_items_validator,
generate_pattern_validator,
generate_enum_validator,
generate_object_validator,
generate_value_processor,
)
from flex.validation.schema import (
construct_schema_validators,
generate_items_validator,
)
from flex.parameters import find_parameter
from flex.paths import NORMALIZE_SLASH_REGEX, path_to_regex
from flex.constants import EMPTY
def validate_required(value, **kwargs):
if value is EMPTY:
raise ValidationError(MESSAGES['required']['required'])
def generate_required_validator(required, **kwargs):
if required:
return validate_required
else:
return noop
def type_cast_parameters(parameter_values, parameter_definitions, context):
typed_parameters = {}
for key in parameter_values.keys():
try:
parameter_definition = find_parameter(parameter_definitions, name=key)
except (KeyError, MultipleParametersFound, NoParameterFound):
continue
value = parameter_values[key]
value_processor = generate_value_processor(context=context, **parameter_definition)
typed_parameters[key] = value_processor(value)
return typed_parameters
def get_path_parameter_values(target_path, api_path, path_parameters, context):
raw_values = path_to_regex(
api_path,
path_parameters,
).match(target_path).groupdict()
return type_cast_parameters(raw_values, path_parameters, context=context)
def validate_path_parameters(target_path, api_path, path_parameters, context):
"""
Helper function for validating a request path
"""
base_path = context.get('basePath', '')
full_api_path = re.sub(NORMALIZE_SLASH_REGEX, '/', base_path + api_path)
parameter_values = get_path_parameter_values(
target_path, full_api_path, path_parameters, context,
)
validate_parameters(parameter_values, path_parameters, context=context)
def validate_query_parameters(raw_query_data, query_parameters, context):
query_data = {}
for key, value in raw_query_data.items():
if is_non_string_iterable(value) and len(value) == 1:
query_data[key] = value[0]
else:
query_data[key] = value
query_data = type_cast_parameters(query_data, query_parameters, context)
validate_parameters(query_data, query_parameters, context)
def validate_parameters(parameter_values, parameters, context):
validators = construct_multi_parameter_validators(parameters, context=context)
with ErrorDict() as errors:
for key, validator in validators.items():
try:
validator(parameter_values.get(key, EMPTY))
except ValidationError as err:
errors[key].add_error(err.detail)
def construct_parameter_validators(parameter, context):
"""
Constructs a dictionary of validator functions for the provided parameter
definition.
"""
validators = ValidationDict()
if '$ref' in parameter:
validators.add_validator(
'$ref', ParameterReferenceValidator(parameter['$ref'], context),
)
for key in parameter:
if key in validator_mapping:
validators.add_validator(
key,
validator_mapping[key](context=context, **parameter),
)
if 'schema' in parameter:
schema_validators = construct_schema_validators(parameter['schema'], context=context)
for key, value in schema_validators.items():
validators.setdefault(key, value)
return validators
validator_mapping = {
'type': generate_type_validator,
'format': generate_format_validator,
'required': generate_required_validator,
'multipleOf': generate_multiple_of_validator,
'minimum': generate_minimum_validator,
'maximum': generate_maximum_validator,
'minLength': generate_min_length_validator,
'maxLength': generate_max_length_validator,
'minItems': generate_min_items_validator,
'maxItems': generate_max_items_validator,
'uniqueItems': generate_unique_items_validator,
'enum': generate_enum_validator,
'pattern': generate_pattern_validator,
'items': generate_items_validator,
}
def construct_multi_parameter_validators(parameters, context):
"""
Given an iterable of parameters, returns a dictionary of validator
functions for each parameter. Note that this expects the parameters to be
unique in their name value, and throws an error if this is not the case.
"""
validators = ValidationDict()
for parameter in parameters:
key = parameter['name']
if key in validators:
raise ValueError("Duplicate parameter name {0}".format(key))
parameter_validators = construct_parameter_validators(parameter, context=context)
validators.add_validator(
key,
generate_object_validator(field_validators=parameter_validators),
)
return validators
class ParameterReferenceValidator(LazyReferenceValidator):
validators_constructor = construct_parameter_validators
| mit | c60e27d48dcd755668356bb6a191cd21 | 33.116959 | 93 | 0.698149 | 4.311899 | false | false | false | false |
pipermerriam/flex | flex/loading/schema/paths/path_item/operation/responses/__init__.py | 1 | 1191 | import functools
from flex.datastructures import (
ValidationDict,
ValidationList,
)
from flex.constants import (
OBJECT,
)
from flex.validation.common import (
generate_object_validator,
apply_validator_to_object,
)
from flex.validation.utils import (
generate_any_validator,
)
from flex.loading.common.reference import (
reference_object_validator,
)
from .single import (
single_response_validator,
)
responses_schema = {
'type': OBJECT,
}
field_validators = ValidationDict()
field_validators.add_property_validator(
'default',
generate_any_validator(
referenceObject=reference_object_validator,
responseObject=single_response_validator,
),
)
non_field_validators = ValidationList()
non_field_validators.add_validator(
functools.partial(
apply_validator_to_object,
validator=generate_any_validator(
referenceObject=reference_object_validator,
responseObject=single_response_validator,
),
)
)
responses_validator = generate_object_validator(
schema=responses_schema,
field_validators=field_validators,
non_field_validators=non_field_validators,
)
| mit | 95fc39f759683331b86fb31fda38bcbe | 19.894737 | 55 | 0.712007 | 3.97 | false | false | true | false |
pipermerriam/flex | flex/loading/common/single_header/__init__.py | 1 | 2366 | from flex.datastructures import (
ValidationDict,
)
from flex.error_messages import (
MESSAGES,
)
from flex.constants import (
OBJECT,
ARRAY,
EMPTY,
)
from flex.exceptions import ValidationError
from flex.utils import (
pluralize,
)
from flex.decorators import (
pull_keys_from_obj,
suffix_reserved_words,
)
from flex.loading.common import (
field_validators as common_field_validators,
non_field_validators as common_non_field_validators,
type_validators as common_type_validators,
)
from flex.loading.common.default import (
validate_default_is_of_one_of_declared_types,
)
from flex.validation.common import (
generate_object_validator,
)
from flex.loading.common.format import (
format_validator,
)
from .description import (
description_validator,
)
from .type import (
type_validator,
)
from .collection_format import (
collection_format_validator,
)
single_header_schema = {
'type': OBJECT,
'required': [
'type',
]
}
single_header_field_validators = ValidationDict()
single_header_field_validators.update(common_field_validators)
single_header_field_validators.add_property_validator('description', description_validator)
single_header_field_validators.add_property_validator('type', type_validator)
single_header_field_validators.add_property_validator('format', format_validator)
single_header_field_validators.add_property_validator(
'collectionFormat', collection_format_validator,
)
@pull_keys_from_obj('type', 'items')
@suffix_reserved_words
def validate_items_required_if_type_arraw(type_, items, **kwargs):
types = pluralize(type_)
if ARRAY in types and items is EMPTY:
raise ValidationError(MESSAGES['required']['required'])
single_header_non_field_validators = ValidationDict()
single_header_non_field_validators.update(common_non_field_validators)
single_header_non_field_validators.update(common_type_validators)
single_header_non_field_validators.add_validator(
'default', validate_default_is_of_one_of_declared_types,
)
single_header_non_field_validators.add_validator(
'items', validate_items_required_if_type_arraw,
)
single_header_validator = generate_object_validator(
schema=single_header_schema,
field_validators=single_header_field_validators,
non_field_validators=single_header_non_field_validators,
)
| mit | d842d234ace38d515d74b7c48d975b97 | 27.166667 | 91 | 0.751057 | 3.595745 | false | false | false | false |
pipermerriam/flex | tests/loading/schema/paths/operation/responses/test_response_validation.py | 1 | 2139 | import pytest
from flex.error_messages import MESSAGES
from flex.exceptions import (
ValidationError,
)
from flex.loading.schema.paths.path_item.operation.responses import (
responses_validator,
)
def test_description_is_required(msg_assertions):
with pytest.raises(ValidationError) as err:
responses_validator({
200: {},
})
msg_assertions.assert_message_in_errors(
MESSAGES['required']['required'],
err.value.detail,
'200.required.description',
)
def test_response_as_reference_missing_description(msg_assertions):
responses = {
200: {
'$ref': '#/responses/SomeResponse'
},
}
context = {
'responses': {
'SomeResponse': {},
},
}
try:
responses_validator(responses, context=context)
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('200', errors)
def test_with_description(msg_assertions):
try:
responses_validator({
200: {'description': 'A Description'},
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('200', errors)
def test_with_description_in_reference(msg_assertions):
responses = {
200: {'$ref': '#/responses/SomeResponse'},
}
context = {
'responses': {
'SomeResponse': {'description': 'A Description'},
},
}
try:
responses_validator(responses, context=context)
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('200', errors)
def test_with_missing_reference(msg_assertions):
responses = {
200: {'$ref': '#/responses/UnknownReference'},
}
with pytest.raises(ValidationError) as err:
responses_validator(responses, context={})
msg_assertions.assert_message_in_errors(
MESSAGES['reference']['undefined'],
err.value.detail,
'200.$ref',
)
| mit | 95eab451e80c622940bf32d771a4857e | 23.033708 | 69 | 0.603086 | 4.051136 | false | true | false | false |
kornai/4lang | src/fourlang/longman_parser.py | 3 | 3441 | #!/usr/bin/env python
# Module for reading Longman XML and producing JSON output
from collections import defaultdict
import json
import re
import sys
from xml_parser import XMLParser
assert json # silence pyflakes
class LongmanParser(XMLParser):
@staticmethod
def add_suffixes(text):
return re.sub(" <SUFFIX> (.*?) </SUFFIX>", "\\1", text)
@staticmethod
def remove_extra_whitespace(text):
if text is None:
return None
return " ".join(text.split()).strip()
@staticmethod
def clean_definition(definition):
if definition is None:
return definition
for tag in ("TEXT", "NonDV", "REFHWD", "FULLFORM", "PRON",
"PronCodes", "ABBR"):
definition = LongmanParser.remove_tags(tag, definition)
for tag in ("REFSENSENUM", "REFHOMNUM", "GLOSS"):
definition = LongmanParser.remove_sections(tag, definition)
definition = LongmanParser.remove_extra_whitespace(definition)
definition = LongmanParser.add_suffixes(definition)
return definition
@staticmethod
def parse_sense(text):
definition = LongmanParser.clean_definition(
LongmanParser.get_section("DEF", text))
full_form = LongmanParser.get_section("FULLFORM", text)
mwe = LongmanParser.get_section("LEXUNIT", text)
return {"full_form": full_form, "definition": definition, 'mwe': mwe}
@staticmethod
def get_headword(entry_text):
"""Return the first group of "HWD" in entry_text"""
return LongmanParser.remove_extra_whitespace(
LongmanParser.get_section("HWD", entry_text))
@staticmethod
def get_pos(entry_text):
return LongmanParser.remove_extra_whitespace(
LongmanParser.get_section("POS", entry_text))
@staticmethod
def parse_entry(entry_text):
""" """
entry = {
"hw": LongmanParser.get_headword(entry_text),
"senses": map(
LongmanParser.parse_sense,
LongmanParser.iter_sections("Sense", entry_text)),
}
pos = LongmanParser.get_pos(entry_text)
for sense in entry['senses']:
sense['pos'] = pos
hom_num = LongmanParser.get_section('HOMNUM', entry_text)
if hom_num is not None:
entry['hom_num'] = hom_num.strip()
return entry
@staticmethod
def parse_xml(xml_text):
"""Give items of generator of "Entry" strings in xml_text to
'parse_entry' method one by one."""
for raw_entry in LongmanParser.iter_sections("Entry", xml_text):
yield LongmanParser.parse_entry(raw_entry)
@staticmethod
def print_defs(longman_obj):
for entry in longman_obj:
for sense in entry['senses']:
print u"{0}\t{1}".format(
entry['hw'], sense['definition']).encode("utf-8")
@staticmethod
def print_sorted_defs(longman_obj):
index = defaultdict(list)
for e in longman_obj:
index[e['hw']].append(e)
for hw in sorted(index.iterkeys()):
for entry in index[hw]:
for sense in entry['senses']:
print u"{0}\t{1}".format(
hw, sense['definition']).encode("utf-8")
if __name__ == "__main__":
LongmanParser.print_sorted_defs(LongmanParser.parse_file(sys.argv[1]))
| mit | a88b9bb642ad5f8ef98bce0523efd84f | 31.462264 | 77 | 0.598663 | 3.941581 | false | false | false | false |
joke2k/django-environ | docs/conf.py | 1 | 5973 | # This file is part of the django-environ.
#
# Copyright (c) 2021-2022, Serghei Iakovlev <egrep@protonmail.ch>
# Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
#
# -- Utils ---------------------------------------------------------
#
import codecs
import os
import sys
import re
from datetime import date
PROJECT_DIR = os.path.abspath('..')
sys.path.insert(0, PROJECT_DIR)
def read_file(filepath):
"""Read content from a UTF-8 encoded text file."""
with codecs.open(filepath, 'rb', 'utf-8') as file_handle:
return file_handle.read()
def find_version(meta_file):
"""Extract ``__version__`` from meta_file."""
contents = read_file(os.path.join(PROJECT_DIR, meta_file))
meta_match = re.search(
r"^__version__\s+=\s+['\"]([^'\"]*)['\"]",
contents,
re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError(
"Unable to find __version__ string in package meta file")
#
# -- Project information -----------------------------------------------------
#
# General information about the project.
project = "django-environ"
copyright = f'2013-{date.today().year}, Daniele Faraglia and other contributors'
author = u"Daniele Faraglia \\and Serghei Iakovlev"
#
# -- General configuration ---------------------------------------------------
#
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"notfound.extension",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# Allow non-local URIs so we can have images in CHANGELOG etc.
suppress_warnings = [
"image.nonlocal_uri",
]
# The master toctree document.
master_doc = "index"
# The version info
# The short X.Y version.
release = find_version(os.path.join("environ", "__init__.py"))
version = release.rsplit(u".", 1)[0]
# The full version, including alpha/beta/rc tags.
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
#
# -- Options for autodoc ---------------------------------------------------
#
# This value selects if automatically documented members are sorted alphabetical
# (value 'alphabetical'), by member type (value 'groupwise') or by source order
# (value 'bysource'). The default is alphabetical.
#
# Note that for source order, the module must be a Python module with the
# source code available.
autodoc_member_order = 'bysource'
#
# -- Options for linkcheck ---------------------------------------------------
#
linkcheck_ignore = [
# We run into GitHub's rate limits.
r"https://github.com/.*/(issues|pull)/\d+",
# Do not check links to compare tags.
r"https://github.com/joke2k/django-environ/compare/.*",
]
#
# -- Options for nitpick -----------------------------------------------------
#
# In nitpick mode (-n), still ignore any of the following "broken" references
# to non-types.
nitpick_ignore = [
]
#
# -- Options for extlinks ----------------------------------------------------
#
extlinks = {
"pypi": ("https://pypi.org/project/%s/", ""),
}
#
# -- Options for intersphinx -------------------------------------------------
#
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
}
#
# -- Options for TODOs -------------------------------------------------------
#
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# html_favicon = None
html_theme = "furo"
html_title = project
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_openserver = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "django-environ-doc"
#
# -- Options for manual page output ------------------------------------------
#
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", project, "django-environ Documentation", [author], 1)
]
#
# -- Options for Texinfo output ----------------------------------------------
#
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
project,
"django-environ Documentation",
author,
project,
"Configure Django made easy.",
"Miscellaneous",
)
]
| mit | 34620ee0ec444d9a9e608dc5c31e1ecf | 26.027149 | 80 | 0.612757 | 3.751884 | false | false | false | false |
joke2k/django-environ | environ/fileaware_mapping.py | 1 | 3113 | # This file is part of the django-environ.
#
# Copyright (c) 2021-2022, Serghei Iakovlev <egrep@protonmail.ch>
# Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
"""Docker-style file variable support module."""
import os
from collections.abc import MutableMapping
class FileAwareMapping(MutableMapping):
"""
A mapping that wraps os.environ, first checking for the existence of a key
appended with ``_FILE`` whenever reading a value. If a matching file key is
found then the value is instead read from the file system at this location.
By default, values read from the file system are cached so future lookups
do not hit the disk again.
A ``_FILE`` key has higher precedence than a value is set directly in the
environment, and an exception is raised if the file can not be found.
"""
def __init__(self, env=None, cache=True):
"""
Initialize the mapping.
:param env:
where to read environment variables from (defaults to
``os.environ``)
:param cache:
cache environment variables read from the file system (defaults to
``True``)
"""
self.env = env if env is not None else os.environ
self.cache = cache
self.files_cache = {}
def __getitem__(self, key):
if self.cache and key in self.files_cache:
return self.files_cache[key]
key_file = self.env.get(key + "_FILE")
if key_file:
with open(key_file) as f:
value = f.read()
if self.cache:
self.files_cache[key] = value
return value
return self.env[key]
def __iter__(self):
"""
Iterate all keys, also always including the shortened key if ``_FILE``
keys are found.
"""
for key in self.env:
yield key
if key.endswith("_FILE"):
no_file_key = key[:-5]
if no_file_key and no_file_key not in self.env:
yield no_file_key
def __len__(self):
"""
Return the length of the file, also always counting shortened keys for
any ``_FILE`` key found.
"""
return len(tuple(iter(self)))
def __setitem__(self, key, value):
self.env[key] = value
if self.cache and key.endswith("_FILE"):
no_file_key = key[:-5]
if no_file_key and no_file_key in self.files_cache:
del self.files_cache[no_file_key]
def __delitem__(self, key):
file_key = key + "_FILE"
if file_key in self.env:
del self[file_key]
if key in self.env:
del self.env[key]
return
if self.cache and key.endswith("_FILE"):
no_file_key = key[:-5]
if no_file_key and no_file_key in self.files_cache:
del self.files_cache[no_file_key]
del self.env[key]
| mit | 27cb905a2a7ebfa7e25792b7b965e86c | 32.836957 | 79 | 0.58079 | 4.048114 | false | false | false | false |
hynek/doc2dash | src/doc2dash/parsers/patcher.py | 1 | 2074 | from __future__ import annotations
import logging
import urllib
from collections import defaultdict
from pathlib import Path
from typing import Generator
from rich.progress import Progress
from ..output import console
from .types import EntryType, Parser, ParserEntry
log = logging.getLogger(__name__)
def patch_anchors(
parser: Parser, docs: Path, show_progressbar: bool
) -> Generator[None, ParserEntry, None]:
"""
Consume ``ParseEntry``s then patch docs for TOCs by calling
*parser*'s ``find_entry_and_add_ref``.
"""
files = defaultdict(list)
num = 0
try:
while True:
pentry = yield
try:
fname, anchor = pentry.path.split("#")
files[urllib.parse.unquote(fname)].append(
(pentry.name, pentry.type, anchor)
)
num += 1
except ValueError:
# pydoctor has no anchors for e.g. classes
pass
except GeneratorExit:
pass
with Progress(console=console, disable=not show_progressbar) as pbar:
_patch_files(parser, docs, files, num, pbar)
def _patch_files(
parser: Parser,
docs: Path,
files: dict[str, list[tuple[str, EntryType, str]]],
num: int,
pbar: Progress,
) -> None:
entry_task = pbar.add_task("Patching for TOCs...", total=num)
num_failed = 0
for fname, entries in files.items():
with parser.make_patcher_for_file(docs / fname) as patch:
for (name, type, anchor) in entries:
if not patch(
name, type, anchor, f"//apple_ref/cpp/{type.value}/{name}"
):
log.debug(
"Can't find anchor '%s' (%s) in '%s'.",
anchor,
type,
fname,
)
num_failed += 1
pbar.update(entry_task, advance=1)
if num_failed:
log.warning("Failed to add anchors for %s TOC entries.", num_failed)
| mit | faaa2cedfa0b27210f6ca067d245abf5 | 27.410959 | 78 | 0.547734 | 4.139721 | false | false | false | false |
hynek/doc2dash | tests/parsers/intersphinx/test_intersphinx.py | 1 | 9429 | from pathlib import Path
import pytest
from bs4 import BeautifulSoup
from doc2dash.parsers.intersphinx import (
InterSphinxParser,
_find_entry_and_add_ref,
)
from doc2dash.parsers.types import EntryType, ParserEntry
HERE = Path(__file__).parent
class TestInterSphinxParser:
def test_parses(self, sphinx_built):
"""
Parsing of the example objects.inv in the current directory does not
fail.
"""
p = InterSphinxParser(source=sphinx_built)
assert [] != list(p.parse())
def test_inv_to_entries(self, sphinx_built):
"""
Inventory items are correctly converted.
"""
p = InterSphinxParser(source=sphinx_built)
result = list(
p._inv_to_entries(
{
"py:method": {"some_method": ("some_module.py", "-")},
"std:option": {
"--destination": (
"index.html#cmdoption--destination",
"-",
)
},
"std:constant": {
"SomeConstant": (
"some_other_module.py",
"-",
)
},
}
)
)
assert {
ParserEntry(
name="some_method",
type=EntryType.METHOD,
path="some_module.py",
),
ParserEntry(
name="--destination",
type=EntryType.OPTION,
path="index.html#cmdoption--destination",
),
ParserEntry(
name="SomeConstant",
type=EntryType.CONSTANT,
path="some_other_module.py",
),
} == set(result)
def test_convert_type_override(self, sphinx_built):
"""
`convert_type` can be overridden.
We check that we can hide some key of choice.
"""
class MyInterSphinxParser(InterSphinxParser):
def convert_type(self, inv_type):
if inv_type == "py:method":
# hide method entries
return
return super().convert_type(inv_type)
p = MyInterSphinxParser(source=sphinx_built)
result = list(
p._inv_to_entries(
{
"py:method": {"some_method": ("some_module.py", "-")},
"std:constant": {
"SomeConstant": (
"some_other_module.py",
"-",
)
},
}
)
)
assert [
ParserEntry(
name="SomeConstant",
type=EntryType.CONSTANT,
path="some_other_module.py",
)
] == result
def test_create_entry_override(self, sphinx_built):
"""
`create_entry` has the expected interface and can be overridden.
We check that the name format can be adjusted.
"""
class MyInterSphinxParser(InterSphinxParser):
def create_entry(self, dash_type, key, inv_entry):
path_str = inv_entry[0]
return ParserEntry(
name=f"!{key}!", type=dash_type, path=path_str
)
p = MyInterSphinxParser(source=sphinx_built)
result = list(
p._inv_to_entries(
{"py:method": {"some_method": ("some_module.py", "-")}}
)
)
assert [
ParserEntry(
name="!some_method!",
type=EntryType.METHOD,
path="some_module.py",
)
] == result
def test_create_entry_none(self, sphinx_built):
"""
`create_entry` can return None.
"""
class MyInterSphinxParser(InterSphinxParser):
def create_entry(self, dash_type, key, inv_entry):
if dash_type == EntryType.OPTION:
return
return super().create_entry(dash_type, key, inv_entry)
p = MyInterSphinxParser(source=sphinx_built)
result = list(
p._inv_to_entries(
{
"py:method": {"some_method": ("some_module.py", "-")},
"std:option": {
"--destination": (
"doc2dash",
"2.0",
"index.html#document-usage#cmdoption--"
"destination",
"-",
)
},
}
)
)
assert [
ParserEntry(
name="some_method",
type=EntryType.METHOD,
path="some_module.py",
)
] == result
@pytest.fixture(name="soup")
def _soup():
return BeautifulSoup(
(Path(HERE) / "function_example.html").read_text(encoding="utf-8"),
"html.parser",
)
@pytest.fixture(name="pydoctor_soup")
def _pydoctor_soup():
return BeautifulSoup(
(Path(HERE) / "pydoctor_example.html").read_text(encoding="utf-8"),
"html.parser",
)
class TestFindAndPatchEntry:
def test_patch_method(self, soup):
"""
Patching a method adds a TOC entry.
"""
assert True is _find_entry_and_add_ref(
soup,
name="pyramid.config.Configurator.add_route",
type=EntryType.METHOD,
anchor="pyramid.config.Configurator.add_route",
ref="//apple_ref/cpp/Method/pyramid.config.Configurator.add_route",
)
toc_link = soup(
"a",
attrs={
"name": "//apple_ref/cpp/Method/pyramid.config.Configurator."
"add_route"
},
)
assert toc_link
def test_patch_modules(self):
"""
Patching a module adds the TOC entry into the next <h1>.
"""
soup = BeautifulSoup("<h1>Some Module</h1>", "html.parser")
assert True is _find_entry_and_add_ref(
soup,
name="some_module",
type=EntryType.PACKAGE,
anchor="module-some_module",
ref="//apple_ref/cpp/Module/pyramid.security",
)
assert '<a class="dashAnchor" name="//apple_ref' in str(soup)
def test_patch_fail(self, soup):
"""
Return `False` if anchor can't be found
"""
assert False is _find_entry_and_add_ref(
soup,
name="foo",
type="Nothing",
anchor="does-not-exist",
ref="does-not-matter",
)
def test_patch_term(self, soup):
"""
:term: and glossaries are found.
"""
ref = "//apple_ref/cpp/Word/Whatever"
assert True is _find_entry_and_add_ref(
soup,
name="Whatever",
type=EntryType.WORD,
anchor="term-dict-classes",
ref=ref,
)
assert (
f'<a class="dashAnchor" name="{ref}"></a>'
'<dt id="term-dict-classes">' in str(soup)
)
def test_patch_section(self, soup):
"""
Sections are found.
"""
ref = "//apple_ref/cpp/Section/Chains"
assert True is _find_entry_and_add_ref(
soup,
name="Chains",
type=EntryType.SECTION,
anchor="chains",
ref=ref,
)
assert (
f'<a class="dashAnchor" name="{ref}"></a>'
'<section id="chains">' in str(soup)
)
def test_pydoctor_class(self, pydoctor_soup):
"""
Pydoctor classes are found.
"""
ref = "//apple_ref/cpp/Word/twisted._threads._convenience.Quit.isSet"
patched = _find_entry_and_add_ref(
pydoctor_soup,
name="twisted._threads._convenience.Quit.isSet",
type=EntryType.WORD,
anchor="isSet",
ref=ref,
)
assert patched
assert (
f'<a class="dashAnchor" name="{ref}"></a>'
'<a name="twisted._threads._convenience.Quit.isSet">'
in str(pydoctor_soup)
)
class TestIntersphinxDetect:
def test_does_not_exist(self, tmp_path):
"""
Empty paths without an objects.inv return None.
"""
assert None is InterSphinxParser.detect(tmp_path)
@pytest.mark.parametrize(
"obj",
[
"",
"# Sphinx inventory version 2",
"# Sphinx inventory version 2\n",
"# Sphinx inventory version 2\nFoo",
"# Sphinx inventory version 2\nProject",
"# Sphinx inventory version 2\nProject:",
"# Sphinx inventory version 2\nProject: ",
],
)
def test_corrupt(self, tmp_path, caplog, obj):
"""
Empty paths without an objects.inv return None.
"""
(tmp_path / "objects.inv").write_text(obj)
assert None is InterSphinxParser.detect(tmp_path)
assert (
f"intersphinx: object.inv at {tmp_path} exists, but is corrupt."
== caplog.records[0].message
)
| mit | a437b6170a7548fa1b7009cec6b03328 | 28.19195 | 79 | 0.471206 | 4.347165 | false | true | false | false |
robotpy/pyfrc | pyfrc/tests/basic.py | 1 | 2531 | """
The primary purpose of these tests is to run through your code
and make sure that it doesn't crash. If you actually want to test
your code, you need to write your own custom tests to tease out
the edge cases.
To use these, add the following to a python file in your tests directory::
from pyfrc.tests import *
"""
import pytest
import typing
if typing.TYPE_CHECKING:
from pyfrc.test_support.controller import TestController
def test_autonomous(control: "TestController"):
"""Runs autonomous mode by itself"""
with control.run_robot():
# Run disabled for a short period
control.step_timing(seconds=0.5, autonomous=True, enabled=False)
# Run enabled for 15 seconds
control.step_timing(seconds=15, autonomous=True, enabled=True)
# Disabled for another short period
control.step_timing(seconds=0.5, autonomous=True, enabled=False)
@pytest.mark.filterwarnings("ignore")
def test_disabled(control: "TestController", robot):
"""Runs disabled mode by itself"""
with control.run_robot():
# Run disabled + autonomous for a short period
control.step_timing(seconds=5, autonomous=True, enabled=False)
# Run disabled + !autonomous for a short period
control.step_timing(seconds=5, autonomous=False, enabled=False)
@pytest.mark.filterwarnings("ignore")
def test_operator_control(control: "TestController"):
"""Runs operator control mode by itself"""
with control.run_robot():
# Run disabled for a short period
control.step_timing(seconds=0.5, autonomous=False, enabled=False)
# Run enabled for 15 seconds
control.step_timing(seconds=15, autonomous=False, enabled=True)
# Disabled for another short period
control.step_timing(seconds=0.5, autonomous=False, enabled=False)
@pytest.mark.filterwarnings("ignore")
def test_practice(control: "TestController"):
"""Runs through the entire span of a practice match"""
with control.run_robot():
# Run disabled for a short period
control.step_timing(seconds=0.5, autonomous=True, enabled=False)
# Run autonomous + enabled for 15 seconds
control.step_timing(seconds=15, autonomous=True, enabled=True)
# Disabled for another short period
control.step_timing(seconds=0.5, autonomous=False, enabled=False)
# Run teleop + enabled for 2 minutes
control.step_timing(seconds=120, autonomous=False, enabled=True)
| mit | 93039b2655ba33ddbcb385a32990d1b1 | 30.6375 | 78 | 0.689846 | 3.936236 | false | true | false | false |
robotpy/pyfrc | pyfrc/mains/cli_sim.py | 1 | 2510 | import os
from os.path import abspath, dirname
import argparse
import inspect
import logging
import pathlib
from pkg_resources import iter_entry_points
try:
from importlib.metadata import metadata
except ImportError:
from importlib_metadata import metadata
logger = logging.getLogger("pyfrc.sim")
class PyFrcSim:
"""
Runs the robot using WPILib's GUI HAL Simulator
"""
def __init__(self, parser: argparse.ArgumentParser):
parser.add_argument(
"--nogui",
default=False,
action="store_true",
help="Don't use the WPIlib simulation gui",
)
self.simexts = {}
for entry_point in iter_entry_points(group="robotpysimext", name=None):
if entry_point.module_name == "halsim_gui":
continue
try:
sim_ext_module = entry_point.load()
except ImportError:
print(f"WARNING: Error detected in {entry_point}")
continue
self.simexts[entry_point.name] = sim_ext_module
parser.add_argument(
f"--{entry_point.name}",
default=False,
action="store_true",
help=metadata(entry_point.dist.project_name)["summary"],
)
def run(self, options, robot_class, **static_options):
if not options.nogui:
try:
import halsim_gui
except ImportError:
print("robotpy-halsim-gui is not installed!")
exit(1)
else:
halsim_gui.loadExtension()
# Some extensions (gui) changes the current directory
cwd = os.getcwd()
for name, module in self.simexts.items():
if getattr(options, name.replace("-", "_"), False):
try:
module.loadExtension()
except:
print(f"Error loading {name}!")
raise
os.chdir(cwd)
# initialize physics, attach to the user robot class
from ..physics.core import PhysicsInterface, PhysicsInitException
robot_file = pathlib.Path(inspect.getfile(robot_class)).absolute()
try:
_, robot_class = PhysicsInterface._create_and_attach(
robot_class, robot_file.parent
)
# run the robot
return robot_class.main(robot_class)
except PhysicsInitException:
return False
| mit | 3ee22c37c009bfbb60eccbf8240a8ca4 | 27.522727 | 79 | 0.556574 | 4.490161 | false | false | false | false |
alisaifee/flask-limiter | tests/test_configuration.py | 1 | 4054 | import math
import time
import pytest
from flask import Flask
from limits.errors import ConfigurationError
from limits.storage import MemcachedStorage
from limits.strategies import MovingWindowRateLimiter
from flask_limiter import HeaderNames
from flask_limiter.constants import ConfigVars
from flask_limiter.extension import Limiter
from flask_limiter.util import get_remote_address
def test_invalid_strategy():
app = Flask(__name__)
app.config.setdefault(ConfigVars.STRATEGY, "fubar")
with pytest.raises(ConfigurationError):
Limiter(app, key_func=get_remote_address)
def test_invalid_storage_string():
app = Flask(__name__)
app.config.setdefault(ConfigVars.STORAGE_URI, "fubar://localhost:1234")
with pytest.raises(ConfigurationError):
Limiter(app, key_func=get_remote_address)
def test_constructor_arguments_over_config(redis_connection):
app = Flask(__name__)
app.config.setdefault(ConfigVars.STRATEGY, "fixed-window-elastic-expiry")
limiter = Limiter(strategy="moving-window", key_func=get_remote_address)
limiter.init_app(app)
app.config.setdefault(ConfigVars.STORAGE_URI, "redis://localhost:46379")
app.config.setdefault(ConfigVars.APPLICATION_LIMITS, "1/minute")
assert type(limiter._limiter) == MovingWindowRateLimiter
limiter = Limiter(
storage_uri="memcached://localhost:31211", key_func=get_remote_address
)
limiter.init_app(app)
assert type(limiter._storage) == MemcachedStorage
def test_header_names_config():
app = Flask(__name__)
app.config.setdefault(ConfigVars.HEADER_LIMIT, "XX-Limit")
app.config.setdefault(ConfigVars.HEADER_REMAINING, "XX-Remaining")
app.config.setdefault(ConfigVars.HEADER_RESET, "XX-Reset")
limiter = Limiter(
key_func=get_remote_address, headers_enabled=True, default_limits=["1/second"]
)
limiter.init_app(app)
@app.route("/")
def root():
return "42"
with app.test_client() as client:
resp = client.get("/")
assert resp.headers["XX-Limit"] == "1"
assert resp.headers["XX-Remaining"] == "0"
assert resp.headers["XX-Reset"] == str(math.ceil(time.time() + 1))
def test_header_names_constructor():
app = Flask(__name__)
limiter = Limiter(
key_func=get_remote_address,
headers_enabled=True,
default_limits=["1/second"],
header_name_mapping={
HeaderNames.LIMIT: "XX-Limit",
HeaderNames.REMAINING: "XX-Remaining",
HeaderNames.RESET: "XX-Reset",
},
)
limiter.init_app(app)
@app.route("/")
def root():
return "42"
with app.test_client() as client:
resp = client.get("/")
assert resp.headers["XX-Limit"] == "1"
assert resp.headers["XX-Remaining"] == "0"
assert resp.headers["XX-Reset"] == str(math.ceil(time.time() + 1))
def test_invalid_config_with_disabled():
app = Flask(__name__)
app.config.setdefault(ConfigVars.ENABLED, False)
app.config.setdefault(ConfigVars.STORAGE_URI, "fubar://")
limiter = Limiter(app, key_func=get_remote_address, default_limits=["1/hour"])
@app.route("/")
def root():
return "root"
@app.route("/explicit")
@limiter.limit("2/hour")
def explicit():
return "explicit"
with app.test_client() as client:
assert client.get("/").status_code == 200
assert client.get("/").status_code == 200
assert client.get("/explicit").status_code == 200
assert client.get("/explicit").status_code == 200
assert client.get("/explicit").status_code == 200
def test_uninitialized_limiter():
app = Flask(__name__)
limiter = Limiter(key_func=get_remote_address, default_limits=["1/hour"])
@app.route("/")
@limiter.limit("2/hour")
def root():
return "root"
with app.test_client() as client:
assert client.get("/").status_code == 200
assert client.get("/").status_code == 200
assert client.get("/").status_code == 200
| mit | d38146b8ea4e21b2f8920af0e3ddf4f5 | 30.92126 | 86 | 0.653675 | 3.622878 | false | true | false | false |
robotpy/pyfrc | docs/conf.py | 1 | 5080 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Imports
#
import sys
import os
from os.path import abspath, dirname
# Project must be built+installed to generate docs
import pyfrc
import pyfrc.config
pyfrc.config.config_obj["pyfrc"] = dict(game_specific_messages=[])
# -- RTD configuration ------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# This is used for linking and such so we link to the thing we're building
rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest")
if rtd_version not in ["stable", "latest"]:
rtd_version = "stable"
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinx_autodoc_typehints",
]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyfrc"
copyright = "2014-2020, RobotPy development team"
autoclass_content = "both"
intersphinx_mapping = {
"robotpy": ("https://robotpy.readthedocs.io/en/%s/" % rtd_version, None),
"wpilib": (
"https://robotpy.readthedocs.io/projects/wpilib/en/%s/" % rtd_version,
None,
),
}
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pyfrc.version import version
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output ----------------------------------------------
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Output file base name for HTML help builder.
htmlhelp_basename = "pyfrcdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "pyfrc.tex", "pyfrc Documentation", "Dustin Spicuzza", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "pyfrc", "pyfrc Documentation", ["Dustin Spicuzza"], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"pyfrc",
"pyfrc Documentation",
"Dustin Spicuzza",
"pyfrc",
"One line description of project.",
"Miscellaneous",
)
]
# -- Custom Document processing ----------------------------------------------
from robotpy_sphinx.sidebar import generate_sidebar
generate_sidebar(
globals(),
"pyfrc",
"https://raw.githubusercontent.com/robotpy/docs-sidebar/master/sidebar.toml",
)
import sphinx.addnodes
import docutils.nodes
def process_child(node):
"""This function changes class references to not have the
intermediate module name by hacking at the doctree"""
# Edit descriptions to be nicer
if isinstance(node, sphinx.addnodes.desc_addname):
if len(node.children) == 1:
child = node.children[0]
text = child.astext()
if text.startswith("wpilib.") and text.endswith("."):
# remove the last element
text = ".".join(text.split(".")[:-2]) + "."
node.children[0] = docutils.nodes.Text(text)
# Edit literals to be nicer
elif isinstance(node, docutils.nodes.literal):
child = node.children[0]
text = child.astext()
# Remove the imported module name
if text.startswith("wpilib."):
stext = text.split(".")
text = ".".join(stext[:-2] + [stext[-1]])
node.children[0] = docutils.nodes.Text(text)
for child in node.children:
process_child(child)
def doctree_read(app, doctree):
for child in doctree.children:
process_child(child)
def setup(app):
app.connect("doctree-read", doctree_read)
| mit | 6d5540d39a7f3c2fe7b0f961635345f0 | 27.700565 | 98 | 0.628937 | 3.799551 | false | false | false | false |
explosion/thinc | thinc/layers/hard_swish.py | 1 | 2037 | from typing import Tuple, Optional, Callable, cast
from ..config import registry
from ..model import Model
from .chain import chain
from .layernorm import LayerNorm
from .dropout import Dropout
from ..types import Floats1d, Floats2d
from ..util import partial, get_width
from ..initializers import he_normal_init, zero_init
@registry.layers("HardSwish.v1")
def HardSwish(
nO: Optional[int] = None,
nI: Optional[int] = None,
*,
init_W: Callable = he_normal_init,
init_b: Callable = zero_init,
dropout: Optional[float] = None,
normalize: bool = False,
) -> Model[Floats2d, Floats2d]:
model: Model[Floats2d, Floats2d] = Model(
"hardswish",
forward,
init=partial(init, init_W, init_b),
dims={"nO": nO, "nI": nI},
params={"W": None, "b": None},
)
if normalize:
model = chain(model, LayerNorm(nI=nO))
if dropout is not None:
model = chain(model, cast(Model[Floats2d, Floats2d], Dropout(dropout)))
return model
def forward(
model: Model[Floats2d, Floats2d], X: Floats2d, is_train: bool
) -> Tuple[Floats2d, Callable]:
W = cast(Floats2d, model.get_param("W"))
b = cast(Floats1d, model.get_param("b"))
Y_preact = model.ops.affine(X, W, b)
Y = model.ops.hard_swish(Y_preact)
def backprop(dY: Floats2d) -> Floats2d:
dY = model.ops.backprop_hard_swish(dY, Y_preact, inplace=False)
model.inc_grad("b", dY.sum(axis=0))
model.inc_grad("W", model.ops.gemm(dY, X, trans1=True))
return model.ops.gemm(dY, W)
return Y, backprop
def init(
init_W: Callable,
init_b: Callable,
model: Model[Floats2d, Floats2d],
X: Optional[Floats2d] = None,
Y: Optional[Floats2d] = None,
) -> None:
if X is not None:
model.set_dim("nI", get_width(X))
if Y is not None:
model.set_dim("nO", get_width(Y))
model.set_param("W", init_W(model.ops, (model.get_dim("nO"), model.get_dim("nI"))))
model.set_param("b", init_b(model.ops, (model.get_dim("nO"),)))
| mit | 29524bd5cf82c59caff558ddcea23502 | 29.863636 | 87 | 0.628866 | 2.864979 | false | false | false | false |
explosion/thinc | thinc/tests/model/test_model.py | 1 | 19914 | from collections import Counter
import pytest
import threading
import time
from thinc.api import Adam, CupyOps, Dropout, Linear, Model, Relu
from thinc.api import Shim, Softmax, chain, change_attr_values
from thinc.api import concatenate, set_dropout_rate
from thinc.api import use_ops, with_debug, wrap_model_recursive
from thinc.compat import has_cupy_gpu
import numpy
from ..util import make_tempdir
@pytest.fixture
def model_with_no_args():
return Linear()
def create_model(name):
return Model(name, lambda X: (X, lambda dY: dY))
def test_model_defaults_to_cpu(model_with_no_args):
assert not isinstance(model_with_no_args.ops, CupyOps)
def test_models_get_different_ids(model_with_no_args):
model1 = Linear()
model2 = Linear()
assert model1.id != model2.id
def test_model_init():
class MyShim(Shim):
name = "testshim"
model_a = create_model("a")
model = Model(
"test",
lambda X: (X, lambda dY: dY),
dims={"nI": 10, "nO": None},
params={"W": numpy.zeros((10,)), "b": None},
refs={"a": model_a, "b": None},
attrs={"foo": "bar"},
shims=[MyShim(None)],
layers=[model_a, model_a],
)
assert model.has_param("W")
assert model.get_param("W").shape == (10,)
assert model.has_param("b") is None
with pytest.raises(KeyError):
model.get_param("b")
with pytest.raises(KeyError):
model.get_param("X")
model.set_param("X", numpy.zeros((10,)))
assert model.has_param("X")
assert model.get_param("X").shape == (10,)
with model.use_params({(model.id, "X"): numpy.ones((10,))}):
assert numpy.array_equal(model.get_param("X"), numpy.ones((10,)))
assert numpy.array_equal(model.get_param("X"), numpy.zeros((10,)))
assert not model.has_grad("W")
assert not model.has_grad("xyz")
with pytest.raises(KeyError):
model.get_grad("b")
model.set_param("W", model.ops.alloc1f(10))
model.set_grad("W", model.ops.alloc1f(10))
with pytest.raises(ValueError):
model.inc_grad("W", numpy.zeros((5, 0)))
assert model.has_dim("nI")
assert model.get_dim("nI") == 10
with pytest.raises(KeyError):
model.get_dim("xyz")
with pytest.raises(ValueError):
model.get_dim("nO")
assert model.has_ref("a")
assert model.get_ref("a").name == "a"
assert not model.has_ref("xyz")
with pytest.raises(KeyError):
model.get_ref("xyz")
assert model.has_ref("b") is None
with pytest.raises(ValueError):
model.get_ref("b")
model.set_ref("c", model_a)
assert model.has_ref("c")
assert model.get_ref("c").name == "a"
with pytest.raises(ValueError):
model.set_ref("c", create_model("c"))
assert "foo" in model.attrs
assert "bar" not in model.attrs
assert model.attrs["foo"] == "bar"
with pytest.raises(KeyError):
model.attrs["bar"]
model.attrs["bar"] = "baz"
model_copy = model.copy()
assert model_copy.name == "test"
def test_model_set_dim():
class MyShim(Shim):
name = "testshim"
model_a = create_model("a")
model = Model(
"test",
lambda X: (X, lambda dY: dY),
dims={"nI": 5, "nO": None},
params={"W": None, "b": None},
refs={"a": model_a, "b": None},
attrs={"foo": "bar"},
shims=[MyShim(None)],
layers=[model_a, model_a],
)
with pytest.raises(ValueError):
model.set_dim("nI", 10)
# force can be used before any parameters are set
model.set_dim("nI", 10, force=True)
model.set_param("W", model.ops.alloc1f(10))
model.set_grad("W", model.ops.alloc1f(10))
assert model.has_dim("nI")
assert model.get_dim("nI") == 10
with pytest.raises(KeyError):
model.set_dim("xyz", 20)
with pytest.raises(ValueError):
model.set_dim("nI", 20)
# force can't be used after any parameter is set
with pytest.raises(ValueError):
model.set_dim("nI", 20, force=True)
def test_param_names():
model = create_model("tmp")
assert model.param_names == tuple()
model.set_param("param1", None)
assert model.param_names == ("param1",)
model.set_param("param2", None)
assert model.param_names == ("param1", "param2")
def test_grad_names():
model = create_model("tmp")
assert model.grad_names == tuple()
model.set_param("param1", model.ops.alloc2f(4, 4))
model.set_grad("param1", model.ops.alloc2f(4, 4) + 1)
assert model.grad_names == ("param1",)
def test_dim_names():
model = Linear(5, 3)
assert model.dim_names == ("nO", "nI")
def test_model_set_reference():
parent = create_model("parent")
child = create_model("child")
grandchild = create_model("child")
parent.layers.append(child)
assert parent.ref_names == tuple()
parent.set_ref("kid", child)
assert parent.ref_names == ("kid",)
assert parent.get_ref("kid") is child
child.layers.append(grandchild)
with pytest.raises(KeyError):
parent.get_ref("grandkid")
parent.set_ref("grandkid", grandchild)
assert parent.get_ref("grandkid") is grandchild
parent.remove_node(grandchild)
assert grandchild not in child.layers
assert not parent.has_ref("grandkind")
def test_maybe_methods():
model = Linear(5)
assert model.maybe_get_dim("nI") is None
model.set_dim("nI", 4)
assert model.maybe_get_dim("nI") == 4
assert model.maybe_get_ref("boo") is None
assert model.maybe_get_param("W") is None
model.initialize()
assert model.maybe_get_param("W") is not None
def test_model_can_save_to_disk(model_with_no_args):
with make_tempdir() as path:
model_with_no_args.to_disk(path / "thinc_model")
def test_model_can_load_from_disk(model_with_no_args):
with make_tempdir() as path:
model_with_no_args.to_disk(path / "thinc_model")
m2 = model_with_no_args.from_disk(path / "thinc_model")
assert model_with_no_args.to_bytes() == m2.to_bytes()
def test_model_can_roundtrip_with_path_subclass(model_with_no_args, pathy_fixture):
path = pathy_fixture / "thinc_model"
model_with_no_args.to_disk(path)
m2 = model_with_no_args.from_disk(path)
assert model_with_no_args.to_bytes() == m2.to_bytes()
def test_change_attr_values(model_with_no_args):
model = model_with_no_args
model.name = "target"
model.attrs["has_var"] = False
change_attr_values(model, {"target": {"has_var": True, "error": True}})
assert model.attrs["has_var"] is True
assert "error" not in model.attrs
def test_set_dropout():
model = Dropout()
assert model.attrs["dropout_rate"] == 0.0
set_dropout_rate(model, 0.2)
assert model.attrs["dropout_rate"] == 0.2
def test_set_dropout_2(model_with_no_args):
model = model_with_no_args
model.name = "dropout"
model.attrs["dropout_rate"] = 0.0
set_dropout_rate(model, 0.2)
assert model.attrs["dropout_rate"] == 0.2
def test_bind_plus():
with Model.define_operators({"+": lambda a, b: (a.name, b.name)}):
m = create_model(name="a") + create_model(name="b")
assert m == ("a", "b")
def test_plus_chain():
with Model.define_operators({"+": lambda a, b: a}):
m = (
create_model(name="a")
+ create_model(name="b")
+ create_model(name="c")
+ create_model(name="d")
)
assert m.name == "a"
def test_overload_operators_in_subthread():
"""Test we can create a model in a child thread with overloaded operators."""
# Worker1 will start and run, while worker 2 sleeps after Model.define_operators.
# Without thread-safety, worker2 will find that its operator definitions
# have been removed, causing an error.
worker1 = threading.Thread(target=_overload_plus, args=("+", 0))
worker2 = threading.Thread(target=_overload_plus, args=("*", 1))
worker2.start()
worker1.start()
worker1.join()
worker2.join()
worker1 = threading.Thread(target=_overload_plus, args=("+", 1))
worker2 = threading.Thread(target=_overload_plus, args=("*", 0))
worker2.start()
worker1.start()
worker1.join()
worker2.join()
def _overload_plus(operator, sleep):
m1 = create_model(name="a")
m2 = create_model(name="b")
with Model.define_operators({operator: lambda a, b: a.name + b.name}):
time.sleep(sleep)
if operator == "+":
value = m1 + m2
else:
value = m1 * m2
assert value == "ab"
assert Model._context_operators.get() == {}
def test_nested_operator_contexts():
m1 = create_model(name="a")
m2 = create_model(name="b")
assert Model._context_operators.get() == {}
with Model.define_operators({"+": lambda a, b: a.name + b.name}):
value = m1 + m2
with pytest.raises(TypeError):
value = m1 * m2
with Model.define_operators({"*": lambda a, b: a.name + b.name}):
with pytest.raises(TypeError):
value = m1 + m2
value = m1 * m2
with Model.define_operators({"-": lambda a, b: a.name + b.name}):
with pytest.raises(TypeError):
value = m1 + m2
value = m1 - m2
with pytest.raises(TypeError):
value = m1 + m2
value = m1 * m2
value = m1 + m2
with pytest.raises(TypeError):
value = m1 * m2
assert value == "ab"
assert Model._context_operators.get() == {}
@pytest.mark.parametrize("op", "+ - * @ / // % ** << >> & ^ |".split())
def test_all_operators(op):
m1 = Linear()
m2 = Linear()
with Model.define_operators({op: lambda a, b: a.name + b.name}):
if op == "+":
value = m1 + m2
else:
with pytest.raises(TypeError):
value = m1 + m2
if op == "-":
value = m1 - m2
else:
with pytest.raises(TypeError):
value = m1 - m2
if op == "*":
value = m1 * m2
else:
with pytest.raises(TypeError):
value = m1 * m2
if op == "@":
value = m1.__matmul__(m2) # Be kind to Python 2...
else:
with pytest.raises(TypeError):
value = m1.__matmul__(m2)
if op == "/":
value = m1 / m2
else:
with pytest.raises(TypeError):
value = m1 / m2
if op == "//":
value = m1 // m2
else:
with pytest.raises(TypeError):
value = m1 // m2
if op == "^":
value = m1 ^ m2
else:
with pytest.raises(TypeError):
value = m1 ^ m2
if op == "%":
value = m1 % m2
else:
with pytest.raises(TypeError):
value = m1 % m2
if op == "**":
value = m1**m2
else:
with pytest.raises(TypeError):
value = m1**m2
if op == "<<":
value = m1 << m2
else:
with pytest.raises(TypeError):
value = m1 << m2
if op == ">>":
value = m1 >> m2
else:
with pytest.raises(TypeError):
value = m1 >> m2
if op == "&":
value = m1 & m2
else:
with pytest.raises(TypeError):
value = m1 & m2
if op == "^":
value = m1 ^ m2
else:
with pytest.raises(TypeError):
value = m1 ^ m2
if op == "|":
value = m1 | m2
else:
with pytest.raises(TypeError):
value = m1 | m2 # noqa: F841
assert Model._context_operators.get() == {}
def test_unique_id_multithreading():
"""Create a bunch of threads and assert they all get unique IDs"""
list_of_ids = []
def get_model_id(id_list, index):
id_list.append(create_model(name=f"worker{index}").id)
counter = 0
while len(list_of_ids) < 1000:
workers = []
for i in range(50):
w = threading.Thread(target=get_model_id, args=(list_of_ids, counter))
workers.append(w)
counter += 1
for w in workers:
w.start()
for w in workers:
w.join()
assert len(list_of_ids) == len(list(set(list_of_ids)))
@pytest.mark.skipif(not has_cupy_gpu, reason="needs CuPy GPU")
def test_model_gpu():
pytest.importorskip("ml_datasets")
import ml_datasets
with use_ops("cupy"):
n_hidden = 32
dropout = 0.2
(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()
model = chain(
Relu(nO=n_hidden, dropout=dropout),
Relu(nO=n_hidden, dropout=dropout),
Softmax(),
)
# make sure the data is on the right device
train_X = model.ops.asarray(train_X)
train_Y = model.ops.asarray(train_Y)
dev_X = model.ops.asarray(dev_X)
dev_Y = model.ops.asarray(dev_Y)
model.initialize(X=train_X[:5], Y=train_Y[:5])
optimizer = Adam(0.001)
batch_size = 128
for i in range(2):
batches = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)
for X, Y in batches:
Yh, backprop = model.begin_update(X)
backprop(Yh - Y)
model.finish_update(optimizer)
# Evaluate and print progress
correct = 0
total = 0
for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):
Yh = model.predict(X)
correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
total += Yh.shape[0]
def test_replace_node():
relu1 = Relu(5)
relu2 = Relu(5)
relu_chain = chain(relu1, relu2)
relu1_debug = with_debug(relu1)
debug = Model(
"test",
lambda X: (X, lambda dY: dY),
layers=[relu1, relu2, relu1, relu_chain],
refs={"relu1": relu1, "relu2": relu2, "relu3": relu1},
)
debug.replace_node(relu1, relu1_debug)
assert debug.layers[0] == relu1_debug
assert debug.layers[1] == relu2
assert debug.layers[2] == relu1_debug
assert debug.get_ref("relu1") == relu1_debug
assert debug.get_ref("relu2") == relu2
assert debug.get_ref("relu3") == relu1_debug
# Check that nodes are replaced recursively
assert debug.layers[3] == relu_chain
assert debug.layers[3].layers[0] == relu1_debug
assert debug.layers[3].layers[1] == relu2
def test_replace_node_with_indirect_node_ref():
# a
# / \
# x b[y=y]
# | |
# y x
# |
# y
def dummy_model(name, layers):
return Model(name, lambda model, X, is_train: ..., layers=layers)
y = dummy_model("y", [])
x = dummy_model("x", [y])
y_debug = with_debug(y)
b = dummy_model("b", [x])
b.set_ref("y", y)
a = chain(x, b)
a.name = "a"
a.replace_node(y, y_debug)
assert a.layers[0].layers[0] == y_debug
assert a.layers[1].layers[0].layers[0] == y_debug
assert a.layers[1].get_ref("y") == y_debug
def test_with_debug():
pytest.importorskip("ml_datasets")
import ml_datasets
(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()
counts = Counter()
def on_init(*_):
counts["init"] += 1
def on_forward(*_):
counts["forward"] += 1
def on_backprop(*_):
counts["backprop"] += 1
relu = Relu()
relu2 = with_debug(
Relu(), on_init=on_init, on_forward=on_forward, on_backprop=on_backprop
)
chained = chain(relu, relu2, relu2)
chained.initialize(X=train_X[:5], Y=train_Y[:5])
_, backprop = chained(X=train_X[:5], is_train=False)
# Not real loss gradients, but we don't care for testing.
backprop(train_Y[:5])
# Four times forward, because initialization also applies forward for
# validation.
assert counts == {"init": 2, "forward": 4, "backprop": 2}
def test_recursive_wrap():
def dummy_model(name, layers):
return Model(name, lambda model, X, is_train: ..., layers=layers)
# Check:
#
# * Recursion: chain -> relu
# * Multiple sublayers: chain -> [relu, relu]
relu = Relu(5)
chained = chain(relu, relu)
chained_debug = wrap_model_recursive(
chained, lambda model: dummy_model(f"dummy({model.name})", [model])
)
assert chained_debug.name == "dummy(relu>>relu)"
assert chained_debug.layers[0] is chained
assert chained_debug.layers[0].layers[0].name == "dummy(relu)"
assert chained_debug.layers[0].layers[0].layers[0] is relu
assert chained_debug.layers[0].layers[1].name == "dummy(relu)"
assert chained_debug.layers[0].layers[1].layers[0] is relu
def test_recursive_double_wrap():
def dummy_model(name, layers):
return Model(name, lambda model, X, is_train: ..., layers=layers)
relu = Relu(5)
chained = chain(relu, relu)
concat = concatenate(chained, chained, relu)
concat_wrapped = wrap_model_recursive(
concat, lambda model: dummy_model(f"dummy({model.name})", [model])
)
n_debug = 0
for model in concat_wrapped.walk():
if model.name.startswith("dummy"):
n_debug += 1
# There should be 3 unique dummy wrappers:
# * Around concatenate.
# * Around chain.
# * Around relu.
assert n_debug == 3
assert concat_wrapped.layers[0].layers[0].layers[0].layers[0].name == "dummy(relu)"
assert concat_wrapped.layers[0].layers[0].layers[0].layers[1].name == "dummy(relu)"
assert concat_wrapped.layers[0].layers[1].layers[0].layers[0].name == "dummy(relu)"
assert concat_wrapped.layers[0].layers[1].layers[0].layers[1].name == "dummy(relu)"
assert concat_wrapped.layers[0].layers[2].name == "dummy(relu)"
def test_wrap_non_child_references():
relu = Relu(5)
relu2 = Relu(5)
chained = chain(relu, relu)
chained2 = chain(relu2, chained)
chained2.set_ref("relu", relu)
# Fails in case non-child references cannot be set.
wrap_model_recursive(chained2, with_debug)
def test_walk_dfs():
relu = Relu(5)
relu2 = Relu(5)
inner_chain = chain(relu, relu2)
chained = chain(inner_chain, inner_chain)
assert list(chained.walk(order="dfs_pre")) == [chained, inner_chain, relu, relu2]
assert list(chained.walk(order="dfs_post")) == [
relu,
relu2,
inner_chain,
chained,
]
def test_walk_bfs_post_order_fails():
relu = Relu(5)
with pytest.raises(ValueError, match="Invalid order"):
relu.walk(order="dfs_post_order")
def test_model_copy_with_loop():
class MyShim(Shim):
name = "testshim"
def to_bytes(self):
return test_replace_node_with_indirect_node_ref
def from_bytes(self, bytes):
pass
model_a = create_model("a")
working_shim = MyShim(None)
layer = Model(
"test",
lambda X: (X, lambda dY: dY),
dims={"nI": 5, "nO": 5},
params={"W": numpy.zeros((10,)), "b": None},
refs={"a": model_a, "b": None},
attrs={"foo": "bar"},
shims=[working_shim],
layers=[model_a, model_a],
)
layer2 = Model(
"test2",
lambda X: (X, lambda dY: dY),
dims={"nI": 5, "nO": 5},
params={"W": numpy.zeros((10,)), "b": None},
refs={"a": model_a, "b": None},
attrs={"foo": "bar"},
shims=[working_shim],
layers=[model_a, model_a],
)
relu = Relu(5)
model = chain(layer, relu, layer, layer2)
model2 = model.copy()
model.from_dict(model2.to_dict())
assert model2.name == "test>>relu>>test>>test2"
assert model2.layers[0] == model2.layers[2]
assert id(model2.layers[0].shims[0]) == id(model2.layers[3].shims[0])
| mit | 21722dcaf25664745a799c6a05abeb01 | 29.449541 | 87 | 0.570804 | 3.275868 | false | true | false | false |
rapptz/discord.py | examples/custom_context.py | 3 | 1951 | # This example requires the 'message_content' privileged intent to function.
import random
import discord
from discord.ext import commands
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you don't have permission to do that
# we don't mind, so we can just ignore them
pass
class MyBot(commands.Bot):
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
intents = discord.Intents.default()
intents.message_content = True
bot = MyBot(command_prefix='!', intents=intents)
@bot.command()
async def guess(ctx, number: int):
"""Guess a random number from 1 to 6."""
# explained in a previous example, this gives you
# a random number from 1-6
value = random.randint(1, 6)
# with your new helper function, you can add a
# green check mark if the guess was correct,
# or a red cross mark if it wasn't
await ctx.tick(number == value)
# IMPORTANT: You shouldn't hard code your token
# these are very important, and leaking them can
# let people do very malicious things with your
# bot. Try to use a file or something to keep
# them private, and don't commit it to GitHub
token = "your token here"
bot.run(token)
| mit | e827119e2432caa886da6d182b279a15 | 32.067797 | 76 | 0.670938 | 3.965447 | false | false | false | false |
rapptz/discord.py | discord/role.py | 1 | 17038 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from .asset import Asset
from .permissions import Permissions
from .colour import Colour
from .mixins import Hashable
from .utils import snowflake_time, _bytes_to_base64_data, _get_as_snowflake, MISSING
__all__ = (
'RoleTags',
'Role',
)
if TYPE_CHECKING:
import datetime
from .types.role import (
Role as RolePayload,
RoleTags as RoleTagPayload,
)
from .types.guild import RolePositionUpdate
from .guild import Guild
from .member import Member
from .state import ConnectionState
class RoleTags:
"""Represents tags on a role.
A role tag is a piece of extra information attached to a managed role
that gives it context for the reason the role is managed.
While this can be accessed, a useful interface is also provided in the
:class:`Role` and :class:`Guild` classes as well.
.. versionadded:: 1.6
Attributes
------------
bot_id: Optional[:class:`int`]
The bot's user ID that manages this role.
integration_id: Optional[:class:`int`]
The integration ID that manages the role.
"""
__slots__ = (
'bot_id',
'integration_id',
'_premium_subscriber',
)
def __init__(self, data: RoleTagPayload):
self.bot_id: Optional[int] = _get_as_snowflake(data, 'bot_id')
self.integration_id: Optional[int] = _get_as_snowflake(data, 'integration_id')
# NOTE: The API returns "null" for this if it's valid, which corresponds to None.
# This is different from other fields where "null" means "not there".
# So in this case, a value of None is the same as True.
# Which means we would need a different sentinel.
self._premium_subscriber: Optional[Any] = data.get('premium_subscriber', MISSING)
def is_bot_managed(self) -> bool:
""":class:`bool`: Whether the role is associated with a bot."""
return self.bot_id is not None
def is_premium_subscriber(self) -> bool:
""":class:`bool`: Whether the role is the premium subscriber, AKA "boost", role for the guild."""
return self._premium_subscriber is None
def is_integration(self) -> bool:
""":class:`bool`: Whether the role is managed by an integration."""
return self.integration_id is not None
def __repr__(self) -> str:
return (
f'<RoleTags bot_id={self.bot_id} integration_id={self.integration_id} '
f'premium_subscriber={self.is_premium_subscriber()}>'
)
class Role(Hashable):
"""Represents a Discord role in a :class:`Guild`.
.. container:: operations
.. describe:: x == y
Checks if two roles are equal.
.. describe:: x != y
Checks if two roles are not equal.
.. describe:: x > y
Checks if a role is higher than another in the hierarchy.
.. describe:: x < y
Checks if a role is lower than another in the hierarchy.
.. describe:: x >= y
Checks if a role is higher or equal to another in the hierarchy.
.. describe:: x <= y
Checks if a role is lower or equal to another in the hierarchy.
.. describe:: hash(x)
Return the role's hash.
.. describe:: str(x)
Returns the role's name.
Attributes
----------
id: :class:`int`
The ID for the role.
name: :class:`str`
The name of the role.
guild: :class:`Guild`
The guild the role belongs to.
hoist: :class:`bool`
Indicates if the role will be displayed separately from other members.
position: :class:`int`
The position of the role. This number is usually positive. The bottom
role has a position of 0.
.. warning::
Multiple roles can have the same position number. As a consequence
of this, comparing via role position is prone to subtle bugs if
checking for role hierarchy. The recommended and correct way to
compare for roles in the hierarchy is using the comparison
operators on the role objects themselves.
unicode_emoji: Optional[:class:`str`]
The role's unicode emoji, if available.
.. note::
If :attr:`icon` is not ``None``, it is displayed as role icon
instead of the unicode emoji under this attribute.
If you want the icon that a role has displayed, consider using :attr:`display_icon`.
.. versionadded:: 2.0
managed: :class:`bool`
Indicates if the role is managed by the guild through some form of
integrations such as Twitch.
mentionable: :class:`bool`
Indicates if the role can be mentioned by users.
tags: Optional[:class:`RoleTags`]
The role tags associated with this role.
"""
__slots__ = (
'id',
'name',
'_permissions',
'_colour',
'position',
'_icon',
'unicode_emoji',
'managed',
'mentionable',
'hoist',
'guild',
'tags',
'_state',
)
def __init__(self, *, guild: Guild, state: ConnectionState, data: RolePayload):
self.guild: Guild = guild
self._state: ConnectionState = state
self.id: int = int(data['id'])
self._update(data)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'<Role id={self.id} name={self.name!r}>'
def __lt__(self, other: object) -> bool:
if not isinstance(other, Role) or not isinstance(self, Role):
return NotImplemented
if self.guild != other.guild:
raise RuntimeError('cannot compare roles from two different guilds.')
# the @everyone role is always the lowest role in hierarchy
guild_id = self.guild.id
if self.id == guild_id:
# everyone_role < everyone_role -> False
return other.id != guild_id
if self.position < other.position:
return True
if self.position == other.position:
return int(self.id) > int(other.id)
return False
def __le__(self, other: Any) -> bool:
r = Role.__lt__(other, self)
if r is NotImplemented:
return NotImplemented
return not r
def __gt__(self, other: Any) -> bool:
return Role.__lt__(other, self)
def __ge__(self, other: object) -> bool:
r = Role.__lt__(self, other)
if r is NotImplemented:
return NotImplemented
return not r
def _update(self, data: RolePayload):
self.name: str = data['name']
self._permissions: int = int(data.get('permissions', 0))
self.position: int = data.get('position', 0)
self._colour: int = data.get('color', 0)
self.hoist: bool = data.get('hoist', False)
self._icon: Optional[str] = data.get('icon')
self.unicode_emoji: Optional[str] = data.get('unicode_emoji')
self.managed: bool = data.get('managed', False)
self.mentionable: bool = data.get('mentionable', False)
self.tags: Optional[RoleTags]
try:
self.tags = RoleTags(data['tags'])
except KeyError:
self.tags = None
def is_default(self) -> bool:
""":class:`bool`: Checks if the role is the default role."""
return self.guild.id == self.id
def is_bot_managed(self) -> bool:
""":class:`bool`: Whether the role is associated with a bot.
.. versionadded:: 1.6
"""
return self.tags is not None and self.tags.is_bot_managed()
def is_premium_subscriber(self) -> bool:
""":class:`bool`: Whether the role is the premium subscriber, AKA "boost", role for the guild.
.. versionadded:: 1.6
"""
return self.tags is not None and self.tags.is_premium_subscriber()
def is_integration(self) -> bool:
""":class:`bool`: Whether the role is managed by an integration.
.. versionadded:: 1.6
"""
return self.tags is not None and self.tags.is_integration()
def is_assignable(self) -> bool:
""":class:`bool`: Whether the role is able to be assigned or removed by the bot.
.. versionadded:: 2.0
"""
me = self.guild.me
return not self.is_default() and not self.managed and (me.top_role > self or me.id == self.guild.owner_id)
@property
def permissions(self) -> Permissions:
""":class:`Permissions`: Returns the role's permissions."""
return Permissions(self._permissions)
@property
def colour(self) -> Colour:
""":class:`Colour`: Returns the role colour. An alias exists under ``color``."""
return Colour(self._colour)
@property
def color(self) -> Colour:
""":class:`Colour`: Returns the role color. An alias exists under ``colour``."""
return self.colour
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`.Asset`]: Returns the role's icon asset, if available.
.. note::
If this is ``None``, the role might instead have unicode emoji as its icon
if :attr:`unicode_emoji` is not ``None``.
If you want the icon that a role has displayed, consider using :attr:`display_icon`.
.. versionadded:: 2.0
"""
if self._icon is None:
return None
return Asset._from_icon(self._state, self.id, self._icon, path='role')
@property
def display_icon(self) -> Optional[Union[Asset, str]]:
"""Optional[Union[:class:`.Asset`, :class:`str`]]: Returns the role's display icon, if available.
.. versionadded:: 2.0
"""
return self.icon or self.unicode_emoji
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the role's creation time in UTC."""
return snowflake_time(self.id)
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention a role."""
return f'<@&{self.id}>'
@property
def members(self) -> List[Member]:
"""List[:class:`Member`]: Returns all the members with this role."""
all_members = list(self.guild._members.values())
if self.is_default():
return all_members
role_id = self.id
return [member for member in all_members if member._roles.has(role_id)]
async def _move(self, position: int, reason: Optional[str]) -> None:
if position <= 0:
raise ValueError("Cannot move role to position 0 or below")
if self.is_default():
raise ValueError("Cannot move default role")
if self.position == position:
return # Save discord the extra request.
http = self._state.http
change_range = range(min(self.position, position), max(self.position, position) + 1)
roles = [r.id for r in self.guild.roles[1:] if r.position in change_range and r.id != self.id]
if self.position > position:
roles.insert(0, self.id)
else:
roles.append(self.id)
payload: List[RolePositionUpdate] = [{"id": z[0], "position": z[1]} for z in zip(roles, change_range)]
await http.move_role_position(self.guild.id, payload, reason=reason)
async def edit(
self,
*,
name: str = MISSING,
permissions: Permissions = MISSING,
colour: Union[Colour, int] = MISSING,
color: Union[Colour, int] = MISSING,
hoist: bool = MISSING,
display_icon: Optional[Union[bytes, str]] = MISSING,
mentionable: bool = MISSING,
position: int = MISSING,
reason: Optional[str] = MISSING,
) -> Optional[Role]:
"""|coro|
Edits the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
All fields are optional.
.. versionchanged:: 1.4
Can now pass ``int`` to ``colour`` keyword-only parameter.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited role is returned instead.
.. versionadded:: 2.0
The ``display_icon`` keyword-only parameter was added.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
-----------
name: :class:`str`
The new role name to change to.
permissions: :class:`Permissions`
The new permissions to change to.
colour: Union[:class:`Colour`, :class:`int`]
The new colour to change to. (aliased to color as well)
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
display_icon: Optional[Union[:class:`bytes`, :class:`str`]]
A :term:`py:bytes-like object` representing the icon
or :class:`str` representing unicode emoji that should be used as a role icon.
Could be ``None`` to denote removal of the icon.
Only PNG/JPEG is supported.
This is only available to guilds that contain ``ROLE_ICONS`` in :attr:`Guild.features`.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
position: :class:`int`
The new role's position. This must be below your top role's
position or it will fail.
reason: Optional[:class:`str`]
The reason for editing this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to change the role.
HTTPException
Editing the role failed.
ValueError
An invalid position was given or the default
role was asked to be moved.
Returns
--------
:class:`Role`
The newly edited role.
"""
if position is not MISSING:
await self._move(position, reason=reason)
payload: Dict[str, Any] = {}
if color is not MISSING:
colour = color
if colour is not MISSING:
if isinstance(colour, int):
payload['color'] = colour
else:
payload['color'] = colour.value
if name is not MISSING:
payload['name'] = name
if permissions is not MISSING:
payload['permissions'] = permissions.value
if hoist is not MISSING:
payload['hoist'] = hoist
if display_icon is not MISSING:
payload['icon'] = None
payload['unicode_emoji'] = None
if isinstance(display_icon, bytes):
payload['icon'] = _bytes_to_base64_data(display_icon)
else:
payload['unicode_emoji'] = display_icon
if mentionable is not MISSING:
payload['mentionable'] = mentionable
data = await self._state.http.edit_role(self.guild.id, self.id, reason=reason, **payload)
return Role(guild=self.guild, data=data, state=self._state)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
await self._state.http.delete_role(self.guild.id, self.id, reason=reason)
| mit | 79e5a8e9da6487f2eb98d787f6c30f22 | 32.342466 | 114 | 0.599366 | 4.19139 | false | false | false | false |
rapptz/discord.py | discord/audit_logs.py | 2 | 33755 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union
from . import enums, flags, utils
from .asset import Asset
from .colour import Colour
from .invite import Invite
from .mixins import Hashable
from .object import Object
from .permissions import PermissionOverwrite, Permissions
from .automod import AutoModTrigger, AutoModRuleAction, AutoModPresets, AutoModRule
from .role import Role
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .member import Member
from .scheduled_event import ScheduledEvent
from .stage_instance import StageInstance
from .sticker import GuildSticker
from .threads import Thread
from .integrations import PartialIntegration
from .channel import ForumChannel, StageChannel, ForumTag
__all__ = (
'AuditLogDiff',
'AuditLogChanges',
'AuditLogEntry',
)
if TYPE_CHECKING:
import datetime
from . import abc
from .guild import Guild
from .state import ConnectionState
from .types.audit_log import (
AuditLogChange as AuditLogChangePayload,
AuditLogEntry as AuditLogEntryPayload,
)
from .types.channel import (
PermissionOverwrite as PermissionOverwritePayload,
ForumTag as ForumTagPayload,
DefaultReaction as DefaultReactionPayload,
)
from .types.invite import Invite as InvitePayload
from .types.role import Role as RolePayload
from .types.snowflake import Snowflake
from .types.command import ApplicationCommandPermissions
from .types.automod import AutoModerationTriggerMetadata, AutoModerationAction
from .user import User
from .app_commands import AppCommand
TargetType = Union[
Guild,
abc.GuildChannel,
Member,
User,
Role,
Invite,
Emoji,
StageInstance,
GuildSticker,
Thread,
Object,
PartialIntegration,
AutoModRule,
None,
]
def _transform_timestamp(entry: AuditLogEntry, data: Optional[str]) -> Optional[datetime.datetime]:
return utils.parse_time(data)
def _transform_color(entry: AuditLogEntry, data: int) -> Colour:
return Colour(data)
def _transform_snowflake(entry: AuditLogEntry, data: Snowflake) -> int:
return int(data)
def _transform_channel(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Union[abc.GuildChannel, Object]]:
if data is None:
return None
return entry.guild.get_channel(int(data)) or Object(id=data)
def _transform_channels_or_threads(
entry: AuditLogEntry, data: List[Snowflake]
) -> List[Union[abc.GuildChannel, Thread, Object]]:
return [entry.guild.get_channel_or_thread(int(data)) or Object(id=data) for data in data]
def _transform_member_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Union[Member, User, None]:
if data is None:
return None
return entry._get_member(int(data))
def _transform_guild_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Guild]:
if data is None:
return None
return entry._state._get_guild(int(data))
def _transform_roles(entry: AuditLogEntry, data: List[Snowflake]) -> List[Union[Role, Object]]:
return [entry.guild.get_role(int(role_id)) or Object(role_id, type=Role) for role_id in data]
def _transform_applied_forum_tags(entry: AuditLogEntry, data: List[Snowflake]) -> List[Union[ForumTag, Object]]:
thread = entry.target
if isinstance(thread, Thread) and isinstance(thread.parent, ForumChannel):
return [thread.parent.get_tag(tag_id) or Object(id=tag_id, type=ForumTag) for tag_id in map(int, data)]
return [Object(id=tag_id, type=ForumTag) for tag_id in data]
def _transform_overloaded_flags(entry: AuditLogEntry, data: int) -> Union[int, flags.ChannelFlags]:
# The `flags` key is definitely overloaded. Right now it's for channels and threads but
# I am aware of `member.flags` and `user.flags` existing. However, this does not impact audit logs
# at the moment but better safe than sorry.
channel_audit_log_types = (
enums.AuditLogAction.channel_create,
enums.AuditLogAction.channel_update,
enums.AuditLogAction.channel_delete,
enums.AuditLogAction.thread_create,
enums.AuditLogAction.thread_update,
enums.AuditLogAction.thread_delete,
)
if entry.action in channel_audit_log_types:
return flags.ChannelFlags._from_value(data)
return data
def _transform_forum_tags(entry: AuditLogEntry, data: List[ForumTagPayload]) -> List[ForumTag]:
return [ForumTag.from_data(state=entry._state, data=d) for d in data]
def _transform_default_reaction(entry: AuditLogEntry, data: DefaultReactionPayload) -> Optional[PartialEmoji]:
if data is None:
return None
emoji_name = data.get('emoji_name') or ''
emoji_id = utils._get_as_snowflake(data, 'emoji_id') or None # Coerce 0 -> None
return PartialEmoji.with_state(state=entry._state, name=emoji_name, id=emoji_id)
def _transform_overwrites(
entry: AuditLogEntry, data: List[PermissionOverwritePayload]
) -> List[Tuple[Object, PermissionOverwrite]]:
overwrites = []
for elem in data:
allow = Permissions(int(elem['allow']))
deny = Permissions(int(elem['deny']))
ow = PermissionOverwrite.from_pair(allow, deny)
ow_type = elem['type']
ow_id = int(elem['id'])
target = None
if ow_type == '0':
target = entry.guild.get_role(ow_id)
elif ow_type == '1':
target = entry._get_member(ow_id)
if target is None:
target = Object(id=ow_id, type=Role if ow_type == '0' else Member)
overwrites.append((target, ow))
return overwrites
def _transform_icon(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
if entry.action is enums.AuditLogAction.guild_update:
return Asset._from_guild_icon(entry._state, entry.guild.id, data)
else:
return Asset._from_icon(entry._state, entry._target_id, data, path='role') # type: ignore # target_id won't be None in this case
def _transform_avatar(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_avatar(entry._state, entry._target_id, data) # type: ignore # target_id won't be None in this case
def _transform_cover_image(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_scheduled_event_cover_image(entry._state, entry._target_id, data) # type: ignore # target_id won't be None in this case
def _guild_hash_transformer(path: str) -> Callable[[AuditLogEntry, Optional[str]], Optional[Asset]]:
def _transform(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_guild_image(entry._state, entry.guild.id, data, path=path)
return _transform
def _transform_automod_trigger_metadata(
entry: AuditLogEntry, data: AutoModerationTriggerMetadata
) -> Optional[AutoModTrigger]:
if isinstance(entry.target, AutoModRule):
# Trigger type cannot be changed, so type should be the same before and after updates.
# Avoids checking which keys are in data to guess trigger type
# or returning None if data is empty.
try:
return AutoModTrigger.from_data(type=entry.target.trigger.type.value, data=data)
except Exception:
pass
# If cannot get trigger type from the rule and data is empty, then cannot determine trigger type
if not data:
return None
# Try to infer trigger type from available keys in data
if 'presets' in data:
return AutoModTrigger(
type=enums.AutoModRuleTriggerType.keyword_preset,
presets=AutoModPresets._from_value(data['presets']), # type: ignore
allow_list=data.get('allow_list'),
)
elif 'keyword_filter' in data:
return AutoModTrigger(type=enums.AutoModRuleTriggerType.keyword, keyword_filter=data['keyword_filter']) # type: ignore
elif 'mention_total_limit' in data:
return AutoModTrigger(type=enums.AutoModRuleTriggerType.mention_spam, mention_limit=data['mention_total_limit']) # type: ignore
def _transform_automod_actions(entry: AuditLogEntry, data: List[AutoModerationAction]) -> List[AutoModRuleAction]:
return [AutoModRuleAction.from_data(action) for action in data]
E = TypeVar('E', bound=enums.Enum)
def _enum_transformer(enum: Type[E]) -> Callable[[AuditLogEntry, int], E]:
def _transform(entry: AuditLogEntry, data: int) -> E:
return enums.try_enum(enum, data)
return _transform
F = TypeVar('F', bound=flags.BaseFlags)
def _flag_transformer(cls: Type[F]) -> Callable[[AuditLogEntry, Union[int, str]], F]:
def _transform(entry: AuditLogEntry, data: Union[int, str]) -> F:
return cls._from_value(int(data))
return _transform
def _transform_type(entry: AuditLogEntry, data: int) -> Union[enums.ChannelType, enums.StickerType]:
if entry.action.name.startswith('sticker_'):
return enums.try_enum(enums.StickerType, data)
else:
return enums.try_enum(enums.ChannelType, data)
class AuditLogDiff:
def __len__(self) -> int:
return len(self.__dict__)
def __iter__(self) -> Generator[Tuple[str, Any], None, None]:
yield from self.__dict__.items()
def __repr__(self) -> str:
values = ' '.join('%s=%r' % item for item in self.__dict__.items())
return f'<AuditLogDiff {values}>'
if TYPE_CHECKING:
def __getattr__(self, item: str) -> Any:
...
def __setattr__(self, key: str, value: Any) -> Any:
...
Transformer = Callable[["AuditLogEntry", Any], Any]
class AuditLogChanges:
# fmt: off
TRANSFORMERS: ClassVar[Dict[str, Tuple[Optional[str], Optional[Transformer]]]] = {
'verification_level': (None, _enum_transformer(enums.VerificationLevel)),
'explicit_content_filter': (None, _enum_transformer(enums.ContentFilter)),
'allow': (None, _flag_transformer(Permissions)),
'deny': (None, _flag_transformer(Permissions)),
'permissions': (None, _flag_transformer(Permissions)),
'id': (None, _transform_snowflake),
'color': ('colour', _transform_color),
'owner_id': ('owner', _transform_member_id),
'inviter_id': ('inviter', _transform_member_id),
'channel_id': ('channel', _transform_channel),
'afk_channel_id': ('afk_channel', _transform_channel),
'system_channel_id': ('system_channel', _transform_channel),
'system_channel_flags': (None, _flag_transformer(flags.SystemChannelFlags)),
'widget_channel_id': ('widget_channel', _transform_channel),
'rules_channel_id': ('rules_channel', _transform_channel),
'public_updates_channel_id': ('public_updates_channel', _transform_channel),
'permission_overwrites': ('overwrites', _transform_overwrites),
'splash_hash': ('splash', _guild_hash_transformer('splashes')),
'banner_hash': ('banner', _guild_hash_transformer('banners')),
'discovery_splash_hash': ('discovery_splash', _guild_hash_transformer('discovery-splashes')),
'icon_hash': ('icon', _transform_icon),
'avatar_hash': ('avatar', _transform_avatar),
'rate_limit_per_user': ('slowmode_delay', None),
'default_thread_rate_limit_per_user': ('default_thread_slowmode_delay', None),
'guild_id': ('guild', _transform_guild_id),
'tags': ('emoji', None),
'default_message_notifications': ('default_notifications', _enum_transformer(enums.NotificationLevel)),
'video_quality_mode': (None, _enum_transformer(enums.VideoQualityMode)),
'privacy_level': (None, _enum_transformer(enums.PrivacyLevel)),
'format_type': (None, _enum_transformer(enums.StickerFormatType)),
'type': (None, _transform_type),
'communication_disabled_until': ('timed_out_until', _transform_timestamp),
'expire_behavior': (None, _enum_transformer(enums.ExpireBehaviour)),
'mfa_level': (None, _enum_transformer(enums.MFALevel)),
'status': (None, _enum_transformer(enums.EventStatus)),
'entity_type': (None, _enum_transformer(enums.EntityType)),
'preferred_locale': (None, _enum_transformer(enums.Locale)),
'image_hash': ('cover_image', _transform_cover_image),
'trigger_type': (None, _enum_transformer(enums.AutoModRuleTriggerType)),
'event_type': (None, _enum_transformer(enums.AutoModRuleEventType)),
'trigger_metadata': ('trigger', _transform_automod_trigger_metadata),
'actions': (None, _transform_automod_actions),
'exempt_channels': (None, _transform_channels_or_threads),
'exempt_roles': (None, _transform_roles),
'applied_tags': (None, _transform_applied_forum_tags),
'available_tags': (None, _transform_forum_tags),
'flags': (None, _transform_overloaded_flags),
'default_reaction_emoji': (None, _transform_default_reaction),
}
# fmt: on
def __init__(self, entry: AuditLogEntry, data: List[AuditLogChangePayload]):
self.before: AuditLogDiff = AuditLogDiff()
self.after: AuditLogDiff = AuditLogDiff()
# special case entire process since each
# element in data is a different target
# key is the target id
if entry.action is enums.AuditLogAction.app_command_permission_update:
self.before.app_command_permissions = []
self.after.app_command_permissions = []
for elem in data:
self._handle_app_command_permissions(
self.before,
entry,
elem.get('old_value'), # type: ignore # value will be an ApplicationCommandPermissions if present
)
self._handle_app_command_permissions(
self.after,
entry,
elem.get('new_value'), # type: ignore # value will be an ApplicationCommandPermissions if present
)
return
for elem in data:
attr = elem['key']
# special cases for role add/remove
if attr == '$add':
self._handle_role(self.before, self.after, entry, elem['new_value']) # type: ignore # new_value is a list of roles in this case
continue
elif attr == '$remove':
self._handle_role(self.after, self.before, entry, elem['new_value']) # type: ignore # new_value is a list of roles in this case
continue
try:
key, transformer = self.TRANSFORMERS[attr]
except (ValueError, KeyError):
transformer = None
else:
if key:
attr = key
transformer: Optional[Transformer]
try:
before = elem['old_value']
except KeyError:
before = None
else:
if transformer:
before = transformer(entry, before)
setattr(self.before, attr, before)
try:
after = elem['new_value']
except KeyError:
after = None
else:
if transformer:
after = transformer(entry, after)
setattr(self.after, attr, after)
# add an alias
if hasattr(self.after, 'colour'):
self.after.color = self.after.colour
self.before.color = self.before.colour
if hasattr(self.after, 'expire_behavior'):
self.after.expire_behaviour = self.after.expire_behavior
self.before.expire_behaviour = self.before.expire_behavior
def __repr__(self) -> str:
return f'<AuditLogChanges before={self.before!r} after={self.after!r}>'
def _handle_role(self, first: AuditLogDiff, second: AuditLogDiff, entry: AuditLogEntry, elem: List[RolePayload]) -> None:
if not hasattr(first, 'roles'):
setattr(first, 'roles', [])
data = []
g: Guild = entry.guild
for e in elem:
role_id = int(e['id'])
role = g.get_role(role_id)
if role is None:
role = Object(id=role_id, type=Role)
role.name = e['name'] # type: ignore # Object doesn't usually have name
data.append(role)
setattr(second, 'roles', data)
def _handle_app_command_permissions(
self,
diff: AuditLogDiff,
entry: AuditLogEntry,
data: Optional[ApplicationCommandPermissions],
):
if data is None:
return
# avoid circular import
from discord.app_commands import AppCommandPermissions
state = entry._state
guild = entry.guild
diff.app_command_permissions.append(AppCommandPermissions(data=data, guild=guild, state=state))
class _AuditLogProxy:
def __init__(self, **kwargs: Any) -> None:
for k, v in kwargs.items():
setattr(self, k, v)
class _AuditLogProxyMemberPrune(_AuditLogProxy):
delete_member_days: int
members_removed: int
class _AuditLogProxyMemberMoveOrMessageDelete(_AuditLogProxy):
channel: Union[abc.GuildChannel, Thread]
count: int
class _AuditLogProxyMemberDisconnect(_AuditLogProxy):
count: int
class _AuditLogProxyPinAction(_AuditLogProxy):
channel: Union[abc.GuildChannel, Thread]
message_id: int
class _AuditLogProxyStageInstanceAction(_AuditLogProxy):
channel: abc.GuildChannel
class _AuditLogProxyMessageBulkDelete(_AuditLogProxy):
count: int
class _AuditLogProxyAutoModAction(_AuditLogProxy):
automod_rule_name: str
automod_rule_trigger_type: str
channel: Union[abc.GuildChannel, Thread]
class AuditLogEntry(Hashable):
r"""Represents an Audit Log entry.
You retrieve these via :meth:`Guild.audit_logs`.
.. container:: operations
.. describe:: x == y
Checks if two entries are equal.
.. describe:: x != y
Checks if two entries are not equal.
.. describe:: hash(x)
Returns the entry's hash.
.. versionchanged:: 1.7
Audit log entries are now comparable and hashable.
Attributes
-----------
action: :class:`AuditLogAction`
The action that was done.
user: :class:`abc.User`
The user who initiated this action. Usually a :class:`Member`\, unless gone
then it's a :class:`User`.
id: :class:`int`
The entry ID.
target: Any
The target that got changed. The exact type of this depends on
the action being done.
reason: Optional[:class:`str`]
The reason this action was done.
extra: Any
Extra information that this entry has that might be useful.
For most actions, this is ``None``. However in some cases it
contains extra information. See :class:`AuditLogAction` for
which actions have this field filled out.
"""
def __init__(
self,
*,
users: Dict[int, User],
integrations: Dict[int, PartialIntegration],
app_commands: Dict[int, AppCommand],
automod_rules: Dict[int, AutoModRule],
data: AuditLogEntryPayload,
guild: Guild,
):
self._state: ConnectionState = guild._state
self.guild: Guild = guild
self._users: Dict[int, User] = users
self._integrations: Dict[int, PartialIntegration] = integrations
self._app_commands: Dict[int, AppCommand] = app_commands
self._automod_rules: Dict[int, AutoModRule] = automod_rules
self._from_data(data)
def _from_data(self, data: AuditLogEntryPayload) -> None:
self.action: enums.AuditLogAction = enums.try_enum(enums.AuditLogAction, data['action_type'])
self.id: int = int(data['id'])
# this key is technically not usually present
self.reason: Optional[str] = data.get('reason')
extra = data.get('options')
# fmt: off
self.extra: Union[
_AuditLogProxyMemberPrune,
_AuditLogProxyMemberMoveOrMessageDelete,
_AuditLogProxyMemberDisconnect,
_AuditLogProxyPinAction,
_AuditLogProxyStageInstanceAction,
_AuditLogProxyMessageBulkDelete,
_AuditLogProxyAutoModAction,
Member, User, None, PartialIntegration,
Role, Object
] = None
# fmt: on
if isinstance(self.action, enums.AuditLogAction) and extra:
if self.action is enums.AuditLogAction.member_prune:
# member prune has two keys with useful information
self.extra = _AuditLogProxyMemberPrune(
delete_member_days=int(extra['delete_member_days']),
members_removed=int(extra['members_removed']),
)
elif self.action is enums.AuditLogAction.member_move or self.action is enums.AuditLogAction.message_delete:
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyMemberMoveOrMessageDelete(
count=int(extra['count']),
channel=self.guild.get_channel_or_thread(channel_id) or Object(id=channel_id),
)
elif self.action is enums.AuditLogAction.member_disconnect:
# The member disconnect action has a dict with some information
self.extra = _AuditLogProxyMemberDisconnect(count=int(extra['count']))
elif self.action is enums.AuditLogAction.message_bulk_delete:
# The bulk message delete action has the number of messages deleted
self.extra = _AuditLogProxyMessageBulkDelete(count=int(extra['count']))
elif self.action.name.endswith('pin'):
# the pin actions have a dict with some information
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyPinAction(
channel=self.guild.get_channel_or_thread(channel_id) or Object(id=channel_id),
message_id=int(extra['message_id']),
)
elif (
self.action is enums.AuditLogAction.automod_block_message
or self.action is enums.AuditLogAction.automod_flag_message
or self.action is enums.AuditLogAction.automod_timeout_member
):
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyAutoModAction(
automod_rule_name=extra['auto_moderation_rule_name'],
automod_rule_trigger_type=enums.try_enum(
enums.AutoModRuleTriggerType, extra['auto_moderation_rule_trigger_type']
),
channel=self.guild.get_channel_or_thread(channel_id) or Object(id=channel_id),
)
elif self.action.name.startswith('overwrite_'):
# the overwrite_ actions have a dict with some information
instance_id = int(extra['id'])
the_type = extra.get('type')
if the_type == '1':
self.extra = self._get_member(instance_id)
elif the_type == '0':
role = self.guild.get_role(instance_id)
if role is None:
role = Object(id=instance_id, type=Role)
role.name = extra.get('role_name') # type: ignore # Object doesn't usually have name
self.extra = role
elif self.action.name.startswith('stage_instance'):
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyStageInstanceAction(
channel=self.guild.get_channel(channel_id) or Object(id=channel_id, type=StageChannel)
)
elif self.action.name.startswith('app_command'):
app_id = int(extra['application_id'])
self.extra = self._get_integration_by_app_id(app_id) or Object(app_id, type=PartialIntegration)
# this key is not present when the above is present, typically.
# It's a list of { new_value: a, old_value: b, key: c }
# where new_value and old_value are not guaranteed to be there depending
# on the action type, so let's just fetch it for now and only turn it
# into meaningful data when requested
self._changes = data.get('changes', [])
user_id = utils._get_as_snowflake(data, 'user_id')
self.user: Optional[Union[User, Member]] = self._get_member(user_id)
self._target_id = utils._get_as_snowflake(data, 'target_id')
def _get_member(self, user_id: Optional[int]) -> Union[Member, User, None]:
if user_id is None:
return None
return self.guild.get_member(user_id) or self._users.get(user_id)
def _get_integration(self, integration_id: Optional[int]) -> Optional[PartialIntegration]:
if integration_id is None:
return None
return self._integrations.get(integration_id)
def _get_integration_by_app_id(self, application_id: Optional[int]) -> Optional[PartialIntegration]:
if application_id is None:
return None
# get PartialIntegration by application id
return utils.get(self._integrations.values(), application_id=application_id)
def _get_app_command(self, app_command_id: Optional[int]) -> Optional[AppCommand]:
if app_command_id is None:
return None
return self._app_commands.get(app_command_id)
def __repr__(self) -> str:
return f'<AuditLogEntry id={self.id} action={self.action} user={self.user!r}>'
@utils.cached_property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the entry's creation time in UTC."""
return utils.snowflake_time(self.id)
@utils.cached_property
def target(self) -> TargetType:
if self.action.target_type is None:
return None
try:
converter = getattr(self, '_convert_target_' + self.action.target_type)
except AttributeError:
if self._target_id is None:
return None
return Object(id=self._target_id)
else:
return converter(self._target_id)
@utils.cached_property
def category(self) -> Optional[enums.AuditLogActionCategory]:
"""Optional[:class:`AuditLogActionCategory`]: The category of the action, if applicable."""
return self.action.category
@utils.cached_property
def changes(self) -> AuditLogChanges:
""":class:`AuditLogChanges`: The list of changes this entry has."""
obj = AuditLogChanges(self, self._changes)
del self._changes
return obj
@utils.cached_property
def before(self) -> AuditLogDiff:
""":class:`AuditLogDiff`: The target's prior state."""
return self.changes.before
@utils.cached_property
def after(self) -> AuditLogDiff:
""":class:`AuditLogDiff`: The target's subsequent state."""
return self.changes.after
def _convert_target_guild(self, target_id: int) -> Guild:
return self.guild
def _convert_target_channel(self, target_id: int) -> Union[abc.GuildChannel, Object]:
return self.guild.get_channel(target_id) or Object(id=target_id)
def _convert_target_user(self, target_id: int) -> Union[Member, User, None]:
return self._get_member(target_id)
def _convert_target_role(self, target_id: int) -> Union[Role, Object]:
return self.guild.get_role(target_id) or Object(id=target_id, type=Role)
def _convert_target_invite(self, target_id: None) -> Invite:
# invites have target_id set to null
# so figure out which change has the full invite data
changeset = self.before if self.action is enums.AuditLogAction.invite_delete else self.after
fake_payload: InvitePayload = {
'max_age': changeset.max_age,
'max_uses': changeset.max_uses,
'code': changeset.code,
'temporary': changeset.temporary,
'uses': changeset.uses,
'channel': None, # type: ignore # the channel is passed to the Invite constructor directly
}
obj = Invite(state=self._state, data=fake_payload, guild=self.guild, channel=changeset.channel)
try:
obj.inviter = changeset.inviter
except AttributeError:
pass
return obj
def _convert_target_emoji(self, target_id: int) -> Union[Emoji, Object]:
return self._state.get_emoji(target_id) or Object(id=target_id, type=Emoji)
def _convert_target_message(self, target_id: int) -> Union[Member, User, None]:
return self._get_member(target_id)
def _convert_target_stage_instance(self, target_id: int) -> Union[StageInstance, Object]:
return self.guild.get_stage_instance(target_id) or Object(id=target_id, type=StageInstance)
def _convert_target_sticker(self, target_id: int) -> Union[GuildSticker, Object]:
return self._state.get_sticker(target_id) or Object(id=target_id, type=GuildSticker)
def _convert_target_thread(self, target_id: int) -> Union[Thread, Object]:
return self.guild.get_thread(target_id) or Object(id=target_id, type=Thread)
def _convert_target_guild_scheduled_event(self, target_id: int) -> Union[ScheduledEvent, Object]:
return self.guild.get_scheduled_event(target_id) or Object(id=target_id, type=ScheduledEvent)
def _convert_target_integration(self, target_id: int) -> Union[PartialIntegration, Object]:
return self._get_integration(target_id) or Object(target_id, type=PartialIntegration)
def _convert_target_app_command(self, target_id: int) -> Union[AppCommand, Object]:
target = self._get_app_command(target_id)
if not target:
# circular import
from .app_commands import AppCommand
target = Object(target_id, type=AppCommand)
return target
def _convert_target_integration_or_app_command(self, target_id: int) -> Union[PartialIntegration, AppCommand, Object]:
target = self._get_integration_by_app_id(target_id) or self._get_app_command(target_id)
if not target:
try:
# get application id from extras
# if it matches target id, type should be integration
target_app = self.extra
# extra should be an Object or PartialIntegration
app_id = target_app.application_id if isinstance(target_app, PartialIntegration) else target_app.id # type: ignore
type = PartialIntegration if target_id == app_id else AppCommand
except AttributeError:
return Object(target_id)
else:
return Object(target_id, type=type)
return target
def _convert_target_auto_moderation(self, target_id: int) -> Union[AutoModRule, Object]:
return self._automod_rules.get(target_id) or Object(target_id, type=AutoModRule)
| mit | 636269f57da64171e3290146e2a20163 | 39.964806 | 144 | 0.617953 | 4.014152 | false | false | false | false |
explosion/thinc | examples/mnist.py | 2 | 1591 | """
PyTorch version: https://github.com/pytorch/examples/blob/master/mnist/main.py
TensorFlow version: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist.py
"""
# pip install thinc ml_datasets typer
from thinc.api import Model, chain, Relu, Softmax, Adam
import ml_datasets
from wasabi import msg
from tqdm import tqdm
import typer
def main(
n_hidden: int = 256, dropout: float = 0.2, n_iter: int = 10, batch_size: int = 128
):
# Define the model
model: Model = chain(
Relu(nO=n_hidden, dropout=dropout),
Relu(nO=n_hidden, dropout=dropout),
Softmax(),
)
# Load the data
(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()
# Set any missing shapes for the model.
model.initialize(X=train_X[:5], Y=train_Y[:5])
train_data = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)
dev_data = model.ops.multibatch(batch_size, dev_X, dev_Y)
# Create the optimizer.
optimizer = Adam(0.001)
for i in range(n_iter):
for X, Y in tqdm(train_data, leave=False):
Yh, backprop = model.begin_update(X)
backprop(Yh - Y)
model.finish_update(optimizer)
# Evaluate and print progress
correct = 0
total = 0
for X, Y in dev_data:
Yh = model.predict(X)
correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
total += Yh.shape[0]
score = correct / total
msg.row((i, f"{score:.3f}"), widths=(3, 5))
if __name__ == "__main__":
typer.run(main)
| mit | 893abcf5904813db69b11f27933fbb9d | 32.851064 | 117 | 0.615965 | 3.150495 | false | false | false | false |
explosion/thinc | thinc/optimizers.py | 1 | 11881 | import math
from typing import Dict, Optional, Union, Tuple, List, cast
from collections import defaultdict
from .backends import get_array_ops
from .types import Generator, FloatsXd
from .config import registry
KeyT = Tuple[int, str]
FloatOrSeq = Union[float, List[float], Generator]
IntOrSeq = Union[int, List[int], Generator]
SGD_DEFAULTS: Dict[str, Union[float, bool, int]] = {
"L2": 0.0,
"L2_is_weight_decay": True,
"grad_clip": 1.0,
}
ADAM_DEFAULTS: Dict[str, Union[float, bool, int]] = {
"learn_rate": 0.001,
"beta1": 0.9,
"beta2": 0.999,
"eps": 1e-08,
"L2": SGD_DEFAULTS["L2"],
"grad_clip": SGD_DEFAULTS["grad_clip"],
"L2_is_weight_decay": True,
}
@registry.optimizers("RAdam.v1")
def RAdam(
learn_rate: FloatOrSeq = ADAM_DEFAULTS["learn_rate"],
*,
beta1: FloatOrSeq = ADAM_DEFAULTS["beta1"],
beta2: FloatOrSeq = ADAM_DEFAULTS["beta2"],
eps: FloatOrSeq = ADAM_DEFAULTS["eps"],
L2: FloatOrSeq = ADAM_DEFAULTS["L2"],
L2_is_weight_decay: bool = cast(bool, ADAM_DEFAULTS["L2_is_weight_decay"]),
grad_clip: FloatOrSeq = ADAM_DEFAULTS["grad_clip"],
use_averages: bool = True,
):
return Optimizer(
learn_rate,
beta1=beta1,
beta2=beta2,
eps=eps,
grad_clip=grad_clip,
L2_is_weight_decay=L2_is_weight_decay,
L2=L2,
use_averages=use_averages,
use_radam=True,
)
@registry.optimizers("Adam.v1")
def Adam(
learn_rate: FloatOrSeq = ADAM_DEFAULTS["learn_rate"],
*,
L2: FloatOrSeq = ADAM_DEFAULTS["L2"],
beta1: FloatOrSeq = ADAM_DEFAULTS["beta1"],
beta2: FloatOrSeq = ADAM_DEFAULTS["beta2"],
eps: FloatOrSeq = ADAM_DEFAULTS["eps"],
grad_clip: FloatOrSeq = ADAM_DEFAULTS["grad_clip"],
L2_is_weight_decay: bool = cast(bool, ADAM_DEFAULTS["L2_is_weight_decay"]),
use_averages: bool = True,
):
return Optimizer(
learn_rate,
L2=L2,
beta1=beta1,
beta2=beta2,
eps=eps,
grad_clip=grad_clip,
L2_is_weight_decay=L2_is_weight_decay,
use_averages=use_averages,
use_radam=False,
)
@registry.optimizers("SGD.v1")
def SGD(
learn_rate: FloatOrSeq,
*,
L2: FloatOrSeq = SGD_DEFAULTS["L2"],
grad_clip: FloatOrSeq = SGD_DEFAULTS["grad_clip"],
L2_is_weight_decay: bool = cast(bool, SGD_DEFAULTS["L2_is_weight_decay"]),
use_averages: bool = True,
):
return Optimizer(
learn_rate,
L2=L2,
grad_clip=grad_clip,
L2_is_weight_decay=L2_is_weight_decay,
beta1=0.0,
beta2=0.0,
use_averages=use_averages,
)
class Optimizer(object):
"""Do various flavours of stochastic gradient descent, with first and
second order momentum. Currently support 'vanilla' SGD, Adam, and RAdam.
"""
mom1: Dict[KeyT, FloatsXd]
mom2: Dict[KeyT, FloatsXd]
averages: Optional[Dict[KeyT, FloatsXd]]
schedules: Dict[str, Generator]
nr_update: Dict[KeyT, int]
last_seen: Dict[KeyT, int]
grad_clip: float
learn_rate: float
b1: float
b2: float
eps: float
L2: float
use_radam: bool
L2_is_weight_decay: bool
_radam_buffer: List[List[Optional[FloatsXd]]]
# This "locks" the class, so we get an error if you try to assign to
# an unexpected variable.
__slots__ = [
"mom1",
"mom2",
"averages",
"schedules",
"nr_update",
"last_seen",
"grad_clip",
"learn_rate",
"b1",
"b2",
"eps",
"L2",
"use_radam",
"L2_is_weight_decay",
"_radam_buffer",
]
def __init__(
self,
learn_rate: FloatOrSeq,
*,
L2: FloatOrSeq = ADAM_DEFAULTS["L2"],
beta1: FloatOrSeq = ADAM_DEFAULTS["beta1"],
beta2: FloatOrSeq = ADAM_DEFAULTS["beta2"],
eps: FloatOrSeq = ADAM_DEFAULTS["eps"],
grad_clip: FloatOrSeq = ADAM_DEFAULTS["grad_clip"],
use_averages: bool = True,
use_radam: bool = False,
L2_is_weight_decay: bool = True,
):
"""
Initialize an optimizer.
learn_rate (float): The initial learning rate.
L2 (float): The L2 regularization term.
beta1 (float): First-order momentum.
beta2 (float): Second-order momentum.
eps (float): Epsilon term for Adam etc.
grad_clip (float): Gradient clipping.
use_averages (bool): Whether to track moving averages of the parameters.
use_radam (bool): Whether to use the RAdam optimizer.
L2_is_weight_decay (bool): Whether to interpret the L2 parameter as a
weight decay term, in the style of the AdamW optimizer.
"""
self.mom1 = {}
self.mom2 = {}
if use_averages:
self.averages = {}
else:
self.averages = None
self.schedules = {}
self.nr_update = defaultdict(int)
self.last_seen = defaultdict(int)
self._set_attr_or_schedule("grad_clip", grad_clip)
self._set_attr_or_schedule("learn_rate", learn_rate)
self._set_attr_or_schedule("b1", beta1)
self._set_attr_or_schedule("b2", beta2)
self._set_attr_or_schedule("eps", eps)
self._set_attr_or_schedule("L2", L2)
self.use_radam = use_radam
self.L2_is_weight_decay = L2_is_weight_decay
self._radam_buffer = [[None, None, None] for _ in range(10)]
def _set_attr_or_schedule(self, name, value):
if isinstance(value, (float, bool, int)):
setattr(self, name, value)
else:
if isinstance(value, list):
value = iter(value)
self.schedules[name] = value
try:
setattr(self, name, next(value))
except (StopIteration, TypeError) as e:
err = f"Invalid schedule for '{name}' ({type(value)})\n{e}"
raise ValueError(err)
def step_schedules(self):
for key, schedule in self.schedules.items():
try:
value = next(schedule)
except StopIteration: # schedule exhausted, use last value
value = getattr(self, key)
setattr(self, key, value)
def __call__(
self,
key: Tuple[int, str],
weights: FloatsXd,
gradient: FloatsXd,
*,
lr_scale: float = 1.0,
):
"""Call the optimizer with weights and a gradient. The key is the
identifier for the parameter, usually the node ID and parameter name.
"""
if len(gradient) < 1:
return weights, gradient
ops = get_array_ops(weights)
self.nr_update[key] += 1
nr_upd = self.nr_update[key]
if self.L2 != 0 and not self.L2_is_weight_decay:
gradient += self.L2 * weights
if self.grad_clip:
gradient = ops.clip_gradient(gradient, self.grad_clip)
if self.use_radam:
weights, gradient = self._radam(
ops, weights, gradient, lr_scale, key, nr_upd
)
elif self.b1 > 0.0 and self.b2 > 0.0:
weights, gradient = self._adam(
ops, weights, gradient, lr_scale, key, nr_upd
)
elif self.b2 > 0.0: # pragma: no cover
raise NotImplementedError # TODO: error message
else:
weights -= lr_scale * self.learn_rate * gradient
gradient *= 0
if self.L2 != 0 and self.L2_is_weight_decay:
weights -= lr_scale * self.learn_rate * self.L2 * weights
if self.averages is not None:
if key not in self.averages:
self.averages[key] = ops.alloc(weights.shape, dtype="float32")
ops.update_averages(self.averages[key], weights, nr_upd)
return weights, gradient
def _radam(self, ops, weights, grad, lr_scale, key, nr_upd):
if key not in self.mom1:
self.mom1[key] = ops.alloc1f(weights.size)
if key not in self.mom2:
self.mom2[key] = ops.alloc1f(weights.size)
weights_1D = ops.reshape1f(weights, weights.size)
gradient_1D = ops.reshape1f(grad, grad.size)
# While we port from the pytorch implementation, keep some of the same
# naming
state = {
"step": self.nr_update[key],
"exp_avg": self.mom1[key],
"exp_avg_sq": self.mom2[key],
}
group = {
"lr": self.learn_rate,
"betas": [self.b1, self.b2],
"eps": self.eps,
"weight_decay": 0.0,
"buffer": self._radam_buffer,
}
degenerated_to_sgd = True
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg_sq *= beta2
exp_avg_sq += (1 - beta2) * (gradient_1D**2)
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg *= beta1
exp_avg += (1 - beta1) * gradient_1D
state["step"] += 1
buffered = group["buffer"][int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
) / (1 - beta1 ** state["step"])
elif degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state["step"])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group["weight_decay"] != 0:
weights_1D += -group["weight_decay"] * group["lr"] * weights_1D
denom = ops.xp.sqrt(exp_avg_sq) + group["eps"]
weights_1D += -step_size * group["lr"] * (exp_avg / denom)
elif step_size > 0:
if group["weight_decay"] != 0:
weights_1D += -group["weight_decay"] * group["lr"] * weights_1D
weights_1D += -step_size * group["lr"] * exp_avg
return (
ops.reshape_f(weights_1D, weights.shape),
ops.reshape_f(gradient_1D, grad.shape),
)
def _adam(self, ops, weights, gradient, lr_scale, key, nr_upd):
weights_1D = ops.reshape1f(weights, weights.size)
gradient_1D = ops.reshape1f(gradient, gradient.size)
if key not in self.mom1:
self.mom1[key] = ops.alloc1f(weights.size)
if key not in self.mom2:
self.mom2[key] = ops.alloc1f(weights.size)
mom1 = self.mom1[key]
mom2 = self.mom2[key]
b1 = self.b1
b2 = self.b2
fix1 = 1.0 - (b1**nr_upd)
fix2 = 1.0 - (b2**nr_upd)
lr = self.learn_rate * fix2**0.5 / fix1
eps = self.eps
# needs to be 1D going into the adam function
weights_1D, gradient_1D, mom1, mom2 = ops.adam(
weights_1D, gradient_1D, mom1, mom2, b1, b2, eps, lr * lr_scale
)
self.mom1[key] = mom1
self.mom2[key] = mom2
return (
ops.reshape_f(weights_1D, weights.shape),
ops.reshape_f(gradient_1D, gradient.shape),
)
__all__ = ["Adam", "RAdam", "SGD", "Optimizer", "ADAM_DEFAULTS", "SGD_DEFAULTS"]
| mit | 37b51889015ecc0a5c5e4d6a7874ca42 | 32.280112 | 80 | 0.546082 | 3.302112 | false | false | false | false |
rapptz/discord.py | discord/webhook/async_.py | 1 | 66566 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import logging
import asyncio
import re
from urllib.parse import quote as urlquote
from typing import Any, Dict, List, Literal, Optional, TYPE_CHECKING, Sequence, Tuple, Union, TypeVar, Type, overload
from contextvars import ContextVar
import weakref
import aiohttp
from .. import utils
from ..errors import HTTPException, Forbidden, NotFound, DiscordServerError
from ..message import Message
from ..enums import try_enum, WebhookType
from ..user import BaseUser, User
from ..flags import MessageFlags
from ..asset import Asset
from ..partial_emoji import PartialEmoji
from ..http import Route, handle_message_parameters, MultipartParameters, HTTPClient, json_or_text
from ..mixins import Hashable
from ..channel import TextChannel, PartialMessageable
from ..file import File
__all__ = (
'Webhook',
'WebhookMessage',
'PartialWebhookChannel',
'PartialWebhookGuild',
)
_log = logging.getLogger(__name__)
if TYPE_CHECKING:
from typing_extensions import Self
from types import TracebackType
from ..embeds import Embed
from ..mentions import AllowedMentions
from ..message import Attachment
from ..state import ConnectionState
from ..http import Response
from ..guild import Guild
from ..emoji import Emoji
from ..channel import VoiceChannel
from ..abc import Snowflake
from ..ui.view import View
import datetime
from ..types.webhook import (
Webhook as WebhookPayload,
SourceGuild as SourceGuildPayload,
)
from ..types.message import (
Message as MessagePayload,
)
from ..types.user import (
User as UserPayload,
PartialUser as PartialUserPayload,
)
from ..types.channel import (
PartialChannel as PartialChannelPayload,
)
from ..types.emoji import PartialEmoji as PartialEmojiPayload
BE = TypeVar('BE', bound=BaseException)
_State = Union[ConnectionState, '_WebhookState']
MISSING: Any = utils.MISSING
class AsyncDeferredLock:
def __init__(self, lock: asyncio.Lock):
self.lock = lock
self.delta: Optional[float] = None
async def __aenter__(self) -> Self:
await self.lock.acquire()
return self
def delay_by(self, delta: float) -> None:
self.delta = delta
async def __aexit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self.delta:
await asyncio.sleep(self.delta)
self.lock.release()
class AsyncWebhookAdapter:
def __init__(self):
self._locks: weakref.WeakValueDictionary[Any, asyncio.Lock] = weakref.WeakValueDictionary()
async def request(
self,
route: Route,
session: aiohttp.ClientSession,
*,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
files: Optional[Sequence[File]] = None,
reason: Optional[str] = None,
auth_token: Optional[str] = None,
params: Optional[Dict[str, Any]] = None,
) -> Any:
headers: Dict[str, str] = {}
files = files or []
to_send: Optional[Union[str, aiohttp.FormData]] = None
bucket = (route.webhook_id, route.webhook_token)
try:
lock = self._locks[bucket]
except KeyError:
self._locks[bucket] = lock = asyncio.Lock()
if payload is not None:
headers['Content-Type'] = 'application/json'
to_send = utils._to_json(payload)
if auth_token is not None:
headers['Authorization'] = f'Bot {auth_token}'
if reason is not None:
headers['X-Audit-Log-Reason'] = urlquote(reason, safe='/ ')
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
method = route.method
url = route.url
webhook_id = route.webhook_id
async with AsyncDeferredLock(lock) as lock:
for attempt in range(5):
for file in files:
file.reset(seek=attempt)
if multipart:
form_data = aiohttp.FormData(quote_fields=False)
for p in multipart:
form_data.add_field(**p)
to_send = form_data
try:
async with session.request(
method, url, data=to_send, headers=headers, params=params, proxy=proxy, proxy_auth=proxy_auth
) as response:
_log.debug(
'Webhook ID %s with %s %s has returned status code %s',
webhook_id,
method,
url,
response.status,
)
data = await json_or_text(response)
remaining = response.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and response.status != 429:
delta = utils._parse_ratelimit_header(response)
_log.debug(
'Webhook ID %s has exhausted its rate limit bucket (retry: %s).',
webhook_id,
delta,
)
lock.delay_by(delta)
if 300 > response.status >= 200:
return data
if response.status == 429:
if not response.headers.get('Via'):
raise HTTPException(response, data)
fmt = 'Webhook ID %s is rate limited. Retrying in %.2f seconds.'
retry_after: float = data['retry_after'] # type: ignore
_log.warning(fmt, webhook_id, retry_after)
await asyncio.sleep(retry_after)
continue
if response.status >= 500:
await asyncio.sleep(1 + attempt * 2)
continue
if response.status == 403:
raise Forbidden(response, data)
elif response.status == 404:
raise NotFound(response, data)
else:
raise HTTPException(response, data)
except OSError as e:
if attempt < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + attempt * 2)
continue
raise
if response:
if response.status >= 500:
raise DiscordServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('Unreachable code in HTTP handling.')
def delete_webhook(
self,
webhook_id: int,
*,
token: Optional[str] = None,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
reason: Optional[str] = None,
) -> Response[None]:
route = Route('DELETE', '/webhooks/{webhook_id}', webhook_id=webhook_id)
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, reason=reason, auth_token=token)
def delete_webhook_with_token(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
reason: Optional[str] = None,
) -> Response[None]:
route = Route('DELETE', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, reason=reason)
def edit_webhook(
self,
webhook_id: int,
token: str,
payload: Dict[str, Any],
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
reason: Optional[str] = None,
) -> Response[WebhookPayload]:
route = Route('PATCH', '/webhooks/{webhook_id}', webhook_id=webhook_id)
return self.request(
route,
session=session,
proxy=proxy,
proxy_auth=proxy_auth,
reason=reason,
payload=payload,
auth_token=token,
)
def edit_webhook_with_token(
self,
webhook_id: int,
token: str,
payload: Dict[str, Any],
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
reason: Optional[str] = None,
) -> Response[WebhookPayload]:
route = Route('PATCH', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, reason=reason, payload=payload)
def execute_webhook(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[Sequence[File]] = None,
thread_id: Optional[int] = None,
wait: bool = False,
) -> Response[Optional[MessagePayload]]:
params = {'wait': int(wait)}
if thread_id:
params['thread_id'] = thread_id
route = Route('POST', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(
route,
session=session,
proxy=proxy,
proxy_auth=proxy_auth,
payload=payload,
multipart=multipart,
files=files,
params=params,
)
def get_webhook_message(
self,
webhook_id: int,
token: str,
message_id: int,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
thread_id: Optional[int] = None,
) -> Response[MessagePayload]:
route = Route(
'GET',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
webhook_id=webhook_id,
webhook_token=token,
message_id=message_id,
)
params = None if thread_id is None else {'thread_id': thread_id}
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, params=params)
def edit_webhook_message(
self,
webhook_id: int,
token: str,
message_id: int,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[Sequence[File]] = None,
thread_id: Optional[int] = None,
) -> Response[Message]:
route = Route(
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
webhook_id=webhook_id,
webhook_token=token,
message_id=message_id,
)
params = None if thread_id is None else {'thread_id': thread_id}
return self.request(
route,
session=session,
proxy=proxy,
proxy_auth=proxy_auth,
payload=payload,
multipart=multipart,
files=files,
params=params,
)
def delete_webhook_message(
self,
webhook_id: int,
token: str,
message_id: int,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
thread_id: Optional[int] = None,
) -> Response[None]:
route = Route(
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
webhook_id=webhook_id,
webhook_token=token,
message_id=message_id,
)
params = None if thread_id is None else {'thread_id': thread_id}
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, params=params)
def fetch_webhook(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
) -> Response[WebhookPayload]:
route = Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id)
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, auth_token=token)
def fetch_webhook_with_token(
self,
webhook_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
) -> Response[WebhookPayload]:
route = Route('GET', '/webhooks/{webhook_id}/{webhook_token}', webhook_id=webhook_id, webhook_token=token)
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth)
def create_interaction_response(
self,
interaction_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
params: MultipartParameters,
) -> Response[None]:
route = Route(
'POST',
'/interactions/{webhook_id}/{webhook_token}/callback',
webhook_id=interaction_id,
webhook_token=token,
)
if params.files:
return self.request(
route,
session=session,
proxy=proxy,
proxy_auth=proxy_auth,
files=params.files,
multipart=params.multipart,
)
else:
return self.request(route, session=session, proxy=proxy, proxy_auth=proxy_auth, payload=params.payload)
def get_original_interaction_response(
self,
application_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
) -> Response[MessagePayload]:
r = Route(
'GET',
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
webhook_id=application_id,
webhook_token=token,
)
return self.request(r, session=session, proxy=proxy, proxy_auth=proxy_auth)
def edit_original_interaction_response(
self,
application_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
payload: Optional[Dict[str, Any]] = None,
multipart: Optional[List[Dict[str, Any]]] = None,
files: Optional[Sequence[File]] = None,
) -> Response[MessagePayload]:
r = Route(
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
webhook_id=application_id,
webhook_token=token,
)
return self.request(
r,
session=session,
proxy=proxy,
proxy_auth=proxy_auth,
payload=payload,
multipart=multipart,
files=files,
)
def delete_original_interaction_response(
self,
application_id: int,
token: str,
*,
session: aiohttp.ClientSession,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
) -> Response[None]:
r = Route(
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
webhook_id=application_id,
webhook_token=token,
)
return self.request(r, session=session, proxy=proxy, proxy_auth=proxy_auth)
def interaction_response_params(type: int, data: Optional[Dict[str, Any]] = None) -> MultipartParameters:
payload: Dict[str, Any] = {
'type': type,
}
if data is not None:
payload['data'] = data
return MultipartParameters(payload=payload, multipart=None, files=None)
# This is a subset of handle_message_parameters
def interaction_message_response_params(
*,
type: int,
content: Optional[str] = MISSING,
tts: bool = False,
flags: MessageFlags = MISSING,
file: File = MISSING,
files: Sequence[File] = MISSING,
embed: Optional[Embed] = MISSING,
embeds: Sequence[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = MISSING,
previous_allowed_mentions: Optional[AllowedMentions] = None,
) -> MultipartParameters:
if files is not MISSING and file is not MISSING:
raise TypeError('Cannot mix file and files keyword arguments.')
if embeds is not MISSING and embed is not MISSING:
raise TypeError('Cannot mix embed and embeds keyword arguments.')
if file is not MISSING:
files = [file]
if attachments is not MISSING and files is not MISSING:
raise TypeError('Cannot mix attachments and files keyword arguments.')
data: Optional[Dict[str, Any]] = {
'tts': tts,
}
if embeds is not MISSING:
if len(embeds) > 10:
raise ValueError('embeds has a maximum of 10 elements.')
data['embeds'] = [e.to_dict() for e in embeds]
if embed is not MISSING:
if embed is None:
data['embeds'] = []
else:
data['embeds'] = [embed.to_dict()]
if content is not MISSING:
if content is not None:
data['content'] = str(content)
else:
data['content'] = None
if view is not MISSING:
if view is not None:
data['components'] = view.to_components()
else:
data['components'] = []
if flags is not MISSING:
data['flags'] = flags.value
if allowed_mentions:
if previous_allowed_mentions is not None:
data['allowed_mentions'] = previous_allowed_mentions.merge(allowed_mentions).to_dict()
else:
data['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_allowed_mentions is not None:
data['allowed_mentions'] = previous_allowed_mentions.to_dict()
if attachments is MISSING:
attachments = files
else:
files = [a for a in attachments if isinstance(a, File)]
if attachments is not MISSING:
file_index = 0
attachments_payload = []
for attachment in attachments:
if isinstance(attachment, File):
attachments_payload.append(attachment.to_dict(file_index))
file_index += 1
else:
attachments_payload.append(attachment.to_dict())
data['attachments'] = attachments_payload
multipart = []
if files:
data = {'type': type, 'data': data}
multipart.append({'name': 'payload_json', 'value': utils._to_json(data)})
data = None
for index, file in enumerate(files):
multipart.append(
{
'name': f'files[{index}]',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
else:
data = {'type': type, 'data': data}
return MultipartParameters(payload=data, multipart=multipart, files=files)
async_context: ContextVar[AsyncWebhookAdapter] = ContextVar('async_webhook_context', default=AsyncWebhookAdapter())
class PartialWebhookChannel(Hashable):
"""Represents a partial channel for webhooks.
These are typically given for channel follower webhooks.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The partial channel's ID.
name: :class:`str`
The partial channel's name.
"""
__slots__ = ('id', 'name')
def __init__(self, *, data: PartialChannelPayload) -> None:
self.id: int = int(data['id'])
self.name: str = data['name']
def __repr__(self) -> str:
return f'<PartialWebhookChannel name={self.name!r} id={self.id}>'
class PartialWebhookGuild(Hashable):
"""Represents a partial guild for webhooks.
These are typically given for channel follower webhooks.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The partial guild's ID.
name: :class:`str`
The partial guild's name.
"""
__slots__ = ('id', 'name', '_icon', '_state')
def __init__(self, *, data: SourceGuildPayload, state: _State) -> None:
self._state: _State = state
self.id: int = int(data['id'])
self.name: str = data['name']
self._icon: str = data['icon']
def __repr__(self) -> str:
return f'<PartialWebhookGuild name={self.name!r} id={self.id}>'
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr: str) -> Any:
raise AttributeError('PartialWebhookState does not support http methods.')
class _WebhookState:
__slots__ = ('_parent', '_webhook', '_thread')
def __init__(self, webhook: Any, parent: Optional[_State], thread: Snowflake = MISSING):
self._webhook: Any = webhook
self._parent: Optional[ConnectionState]
if isinstance(parent, _WebhookState):
self._parent = None
else:
self._parent = parent
self._thread: Snowflake = thread
def _get_guild(self, guild_id: Optional[int]) -> Optional[Guild]:
if self._parent is not None:
return self._parent._get_guild(guild_id)
return None
def store_user(self, data: Union[UserPayload, PartialUserPayload]) -> BaseUser:
if self._parent is not None:
return self._parent.store_user(data)
# state parameter is artificial
return BaseUser(state=self, data=data) # type: ignore
def create_user(self, data: Union[UserPayload, PartialUserPayload]) -> BaseUser:
# state parameter is artificial
return BaseUser(state=self, data=data) # type: ignore
def get_reaction_emoji(self, data: PartialEmojiPayload) -> Union[PartialEmoji, Emoji, str]:
if self._parent is not None:
return self._parent.get_reaction_emoji(data)
emoji_id = utils._get_as_snowflake(data, 'id')
if not emoji_id:
# the name key will be a str
return data['name'] # type: ignore
return PartialEmoji(animated=data.get('animated', False), id=emoji_id, name=data['name']) # type: ignore
@property
def http(self) -> Union[HTTPClient, _FriendlyHttpAttributeErrorHelper]:
if self._parent is not None:
return self._parent.http
# Some data classes assign state.http and that should be kosher
# however, using it should result in a late-binding error.
return _FriendlyHttpAttributeErrorHelper()
def __getattr__(self, attr: str) -> Any:
if self._parent is not None:
return getattr(self._parent, attr)
raise AttributeError(f'PartialWebhookState does not support {attr!r}.')
class WebhookMessage(Message):
"""Represents a message sent from your webhook.
This allows you to edit or delete a message sent by your
webhook.
This inherits from :class:`discord.Message` with changes to
:meth:`edit` and :meth:`delete` to work.
.. versionadded:: 1.6
"""
_state: _WebhookState
async def edit(
self,
*,
content: Optional[str] = MISSING,
embeds: Sequence[Embed] = MISSING,
embed: Optional[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = None,
) -> WebhookMessage:
"""|coro|
Edits the message.
.. versionadded:: 1.6
.. versionchanged:: 2.0
The edit is no longer in-place, instead the newly edited message is returned.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
.. versionadded:: 2.0
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
TypeError
You specified both ``embed`` and ``embeds``
ValueError
The length of ``embeds`` was invalid or
there was no token associated with this webhook.
Returns
--------
:class:`WebhookMessage`
The newly edited message.
"""
return await self._state._webhook.edit_message(
self.id,
content=content,
embeds=embeds,
embed=embed,
attachments=attachments,
view=view,
allowed_mentions=allowed_mentions,
thread=self._state._thread,
)
async def add_files(self, *files: File) -> WebhookMessage:
r"""|coro|
Adds new files to the end of the message attachments.
.. versionadded:: 2.0
Parameters
-----------
\*files: :class:`File`
New files to add to the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`WebhookMessage`
The newly edited message.
"""
return await self.edit(attachments=[*self.attachments, *files])
async def remove_attachments(self, *attachments: Attachment) -> WebhookMessage:
r"""|coro|
Removes attachments from the message.
.. versionadded:: 2.0
Parameters
-----------
\*attachments: :class:`Attachment`
Attachments to remove from the message.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to edit a message that isn't yours.
Returns
--------
:class:`WebhookMessage`
The newly edited message.
"""
return await self.edit(attachments=[a for a in self.attachments if a not in attachments])
async def delete(self, *, delay: Optional[float] = None) -> None:
"""|coro|
Deletes the message.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait before deleting the message.
The waiting is done in the background and deletion failures are ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already.
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def inner_call(delay: float = delay):
await asyncio.sleep(delay)
try:
await self._state._webhook.delete_message(self.id, thread=self._state._thread)
except HTTPException:
pass
asyncio.create_task(inner_call())
else:
await self._state._webhook.delete_message(self.id, thread=self._state._thread)
class BaseWebhook(Hashable):
__slots__: Tuple[str, ...] = (
'id',
'type',
'guild_id',
'channel_id',
'token',
'auth_token',
'user',
'name',
'_avatar',
'source_channel',
'source_guild',
'_state',
)
def __init__(
self,
data: WebhookPayload,
token: Optional[str] = None,
state: Optional[_State] = None,
) -> None:
self.auth_token: Optional[str] = token
self._state: _State = state or _WebhookState(self, parent=state)
self._update(data)
def _update(self, data: WebhookPayload) -> None:
self.id: int = int(data['id'])
self.type: WebhookType = try_enum(WebhookType, int(data['type']))
self.channel_id: Optional[int] = utils._get_as_snowflake(data, 'channel_id')
self.guild_id: Optional[int] = utils._get_as_snowflake(data, 'guild_id')
self.name: Optional[str] = data.get('name')
self._avatar: Optional[str] = data.get('avatar')
self.token: Optional[str] = data.get('token')
user = data.get('user')
self.user: Optional[Union[BaseUser, User]] = None
if user is not None:
# state parameter may be _WebhookState
self.user = User(state=self._state, data=user) # type: ignore
source_channel = data.get('source_channel')
if source_channel:
source_channel = PartialWebhookChannel(data=source_channel)
self.source_channel: Optional[PartialWebhookChannel] = source_channel
source_guild = data.get('source_guild')
if source_guild:
source_guild = PartialWebhookGuild(data=source_guild, state=self._state)
self.source_guild: Optional[PartialWebhookGuild] = source_guild
def is_partial(self) -> bool:
""":class:`bool`: Whether the webhook is a "partial" webhook.
.. versionadded:: 2.0"""
return self.channel_id is None
def is_authenticated(self) -> bool:
""":class:`bool`: Whether the webhook is authenticated with a bot token.
.. versionadded:: 2.0
"""
return self.auth_token is not None
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
return self._state and self._state._get_guild(self.guild_id)
@property
def channel(self) -> Optional[Union[VoiceChannel, TextChannel]]:
"""Optional[Union[:class:`VoiceChannel`, :class:`TextChannel`]]: The channel this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
guild = self.guild
return guild and guild.get_channel(self.channel_id) # type: ignore
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the webhook's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def avatar(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns an :class:`Asset` for the avatar the webhook has.
If the webhook does not have a traditional avatar, ``None`` is returned.
If you want the avatar that a webhook has displayed, consider :attr:`display_avatar`.
"""
if self._avatar is not None:
return Asset._from_avatar(self._state, self.id, self._avatar)
return None
@property
def default_avatar(self) -> Asset:
"""
:class:`Asset`: Returns the default avatar. This is always the blurple avatar.
.. versionadded:: 2.0
"""
# Default is always blurple apparently
return Asset._from_default_avatar(self._state, 0)
@property
def display_avatar(self) -> Asset:
""":class:`Asset`: Returns the webhook's display avatar.
This is either webhook's default avatar or uploaded avatar.
.. versionadded:: 2.0
"""
return self.avatar or self.default_avatar
class Webhook(BaseWebhook):
"""Represents an asynchronous Discord webhook.
Webhooks are a form to send messages to channels in Discord without a
bot user or authentication.
There are two main ways to use Webhooks. The first is through the ones
received by the library such as :meth:`.Guild.webhooks`,
:meth:`.TextChannel.webhooks` and :meth:`.VoiceChannel.webhooks`.
The ones received by the library will automatically be
bound using the library's internal HTTP session.
The second form involves creating a webhook object manually using the
:meth:`~.Webhook.from_url` or :meth:`~.Webhook.partial` classmethods.
For example, creating a webhook from a URL and using :doc:`aiohttp <aio:index>`:
.. code-block:: python3
from discord import Webhook
import aiohttp
async def foo():
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url('url-here', session=session)
await webhook.send('Hello World', username='Foo')
For a synchronous counterpart, see :class:`SyncWebhook`.
.. container:: operations
.. describe:: x == y
Checks if two webhooks are equal.
.. describe:: x != y
Checks if two webhooks are not equal.
.. describe:: hash(x)
Returns the webhooks's hash.
.. versionchanged:: 1.4
Webhooks are now comparable and hashable.
Attributes
------------
id: :class:`int`
The webhook's ID
type: :class:`WebhookType`
The type of the webhook.
.. versionadded:: 1.3
token: Optional[:class:`str`]
The authentication token of the webhook. If this is ``None``
then the webhook cannot be used to make requests.
guild_id: Optional[:class:`int`]
The guild ID this webhook is for.
channel_id: Optional[:class:`int`]
The channel ID this webhook is for.
user: Optional[:class:`abc.User`]
The user this webhook was created by. If the webhook was
received without authentication then this will be ``None``.
name: Optional[:class:`str`]
The default name of the webhook.
source_guild: Optional[:class:`PartialWebhookGuild`]
The guild of the channel that this webhook is following.
Only given if :attr:`type` is :attr:`WebhookType.channel_follower`.
.. versionadded:: 2.0
source_channel: Optional[:class:`PartialWebhookChannel`]
The channel that this webhook is following.
Only given if :attr:`type` is :attr:`WebhookType.channel_follower`.
.. versionadded:: 2.0
"""
__slots__: Tuple[str, ...] = ('session', 'proxy', 'proxy_auth')
def __init__(
self,
data: WebhookPayload,
session: aiohttp.ClientSession,
token: Optional[str] = None,
state: Optional[_State] = None,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
) -> None:
super().__init__(data, token, state)
self.session: aiohttp.ClientSession = session
self.proxy: Optional[str] = proxy
self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth
def __repr__(self) -> str:
return f'<Webhook id={self.id!r}>'
@property
def url(self) -> str:
""":class:`str` : Returns the webhook's url."""
return f'https://discord.com/api/webhooks/{self.id}/{self.token}'
@classmethod
def partial(cls, id: int, token: str, *, session: aiohttp.ClientSession, bot_token: Optional[str] = None) -> Self:
"""Creates a partial :class:`Webhook`.
Parameters
-----------
id: :class:`int`
The ID of the webhook.
token: :class:`str`
The authentication token of the webhook.
session: :class:`aiohttp.ClientSession`
The session to use to send requests with. Note
that the library does not manage the session and
will not close it.
.. versionadded:: 2.0
bot_token: Optional[:class:`str`]
The bot authentication token for authenticated requests
involving the webhook.
.. versionadded:: 2.0
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
data: WebhookPayload = {
'id': id,
'type': 1,
'token': token,
}
return cls(data, session, token=bot_token)
@classmethod
def from_url(cls, url: str, *, session: aiohttp.ClientSession, bot_token: Optional[str] = None) -> Self:
"""Creates a partial :class:`Webhook` from a webhook URL.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
url: :class:`str`
The URL of the webhook.
session: :class:`aiohttp.ClientSession`
The session to use to send requests with. Note
that the library does not manage the session and
will not close it.
.. versionadded:: 2.0
bot_token: Optional[:class:`str`]
The bot authentication token for authenticated requests
involving the webhook.
.. versionadded:: 2.0
Raises
-------
ValueError
The URL is invalid.
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
m = re.search(r'discord(?:app)?\.com/api/webhooks/(?P<id>[0-9]{17,20})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})', url)
if m is None:
raise ValueError('Invalid webhook URL given.')
data: Dict[str, Any] = m.groupdict()
data['type'] = 1
return cls(data, session, token=bot_token) # type: ignore
@classmethod
def _as_follower(cls, data, *, channel, user) -> Self:
name = f"{channel.guild} #{channel}"
feed: WebhookPayload = {
'id': data['webhook_id'],
'type': 2,
'name': name,
'channel_id': channel.id,
'guild_id': channel.guild.id,
'user': {'username': user.name, 'discriminator': user.discriminator, 'id': user.id, 'avatar': user._avatar},
}
state = channel._state
http = state.http
session = http._HTTPClient__session
proxy_auth = http.proxy_auth
proxy = http.proxy
return cls(feed, session=session, state=state, proxy_auth=proxy_auth, proxy=proxy, token=state.http.token)
@classmethod
def from_state(cls, data: WebhookPayload, state: ConnectionState) -> Self:
http = state.http
session = http._HTTPClient__session # type: ignore
proxy_auth = http.proxy_auth
proxy = http.proxy
return cls(data, session=session, state=state, proxy_auth=proxy_auth, proxy=proxy, token=state.http.token)
async def fetch(self, *, prefer_auth: bool = True) -> Webhook:
"""|coro|
Fetches the current webhook.
This could be used to get a full webhook from a partial webhook.
.. versionadded:: 2.0
.. note::
When fetching with an unauthenticated webhook, i.e.
:meth:`is_authenticated` returns ``False``, then the
returned webhook does not contain any user information.
Parameters
-----------
prefer_auth: :class:`bool`
Whether to use the bot token over the webhook token
if available. Defaults to ``True``.
Raises
-------
HTTPException
Could not fetch the webhook
NotFound
Could not find the webhook by this ID
ValueError
This webhook does not have a token associated with it.
Returns
--------
:class:`Webhook`
The fetched webhook.
"""
adapter = async_context.get()
if prefer_auth and self.auth_token:
data = await adapter.fetch_webhook(
self.id,
self.auth_token,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
)
elif self.token:
data = await adapter.fetch_webhook_with_token(
self.id,
self.token,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
)
else:
raise ValueError('This webhook does not have a token associated with it')
return Webhook(
data,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
token=self.auth_token,
state=self._state,
)
async def delete(self, *, reason: Optional[str] = None, prefer_auth: bool = True) -> None:
"""|coro|
Deletes this Webhook.
Parameters
------------
reason: Optional[:class:`str`]
The reason for deleting this webhook. Shows up on the audit log.
.. versionadded:: 1.4
prefer_auth: :class:`bool`
Whether to use the bot token over the webhook token
if available. Defaults to ``True``.
.. versionadded:: 2.0
Raises
-------
HTTPException
Deleting the webhook failed.
NotFound
This webhook does not exist.
Forbidden
You do not have permissions to delete this webhook.
ValueError
This webhook does not have a token associated with it.
"""
if self.token is None and self.auth_token is None:
raise ValueError('This webhook does not have a token associated with it')
adapter = async_context.get()
if prefer_auth and self.auth_token:
await adapter.delete_webhook(
self.id,
token=self.auth_token,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
reason=reason,
)
elif self.token:
await adapter.delete_webhook_with_token(
self.id,
self.token,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
reason=reason,
)
async def edit(
self,
*,
reason: Optional[str] = None,
name: Optional[str] = MISSING,
avatar: Optional[bytes] = MISSING,
channel: Optional[Snowflake] = None,
prefer_auth: bool = True,
) -> Webhook:
"""|coro|
Edits this Webhook.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
name: Optional[:class:`str`]
The webhook's new default name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's new default avatar.
channel: Optional[:class:`abc.Snowflake`]
The webhook's new channel. This requires an authenticated webhook.
.. versionadded:: 2.0
reason: Optional[:class:`str`]
The reason for editing this webhook. Shows up on the audit log.
.. versionadded:: 1.4
prefer_auth: :class:`bool`
Whether to use the bot token over the webhook token
if available. Defaults to ``True``.
.. versionadded:: 2.0
Raises
-------
HTTPException
Editing the webhook failed.
NotFound
This webhook does not exist.
ValueError
This webhook does not have a token associated with it
or it tried editing a channel without authentication.
"""
if self.token is None and self.auth_token is None:
raise ValueError('This webhook does not have a token associated with it')
payload = {}
if name is not MISSING:
payload['name'] = str(name) if name is not None else None
if avatar is not MISSING:
payload['avatar'] = utils._bytes_to_base64_data(avatar) if avatar is not None else None
adapter = async_context.get()
data: Optional[WebhookPayload] = None
# If a channel is given, always use the authenticated endpoint
if channel is not None:
if self.auth_token is None:
raise ValueError('Editing channel requires authenticated webhook')
payload['channel_id'] = channel.id
data = await adapter.edit_webhook(
self.id,
self.auth_token,
payload=payload,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
reason=reason,
)
if prefer_auth and self.auth_token:
data = await adapter.edit_webhook(
self.id,
self.auth_token,
payload=payload,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
reason=reason,
)
elif self.token:
data = await adapter.edit_webhook_with_token(
self.id,
self.token,
payload=payload,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
reason=reason,
)
if data is None:
raise RuntimeError('Unreachable code hit: data was not assigned')
return Webhook(
data,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
token=self.auth_token,
state=self._state,
)
def _create_message(self, data, *, thread: Snowflake):
state = _WebhookState(self, parent=self._state, thread=thread)
# state may be artificial (unlikely at this point...)
if thread is MISSING:
channel = self.channel or PartialMessageable(state=self._state, guild_id=self.guild_id, id=int(data['channel_id'])) # type: ignore
else:
channel = self.channel
if isinstance(channel, TextChannel):
channel = channel.get_thread(thread.id)
if channel is None:
channel = PartialMessageable(state=self._state, guild_id=self.guild_id, id=int(data['channel_id'])) # type: ignore
# state is artificial
return WebhookMessage(data=data, state=state, channel=channel) # type: ignore
@overload
async def send(
self,
content: str = MISSING,
*,
username: str = MISSING,
avatar_url: Any = MISSING,
tts: bool = MISSING,
ephemeral: bool = MISSING,
file: File = MISSING,
files: Sequence[File] = MISSING,
embed: Embed = MISSING,
embeds: Sequence[Embed] = MISSING,
allowed_mentions: AllowedMentions = MISSING,
view: View = MISSING,
thread: Snowflake = MISSING,
thread_name: str = MISSING,
wait: Literal[True],
suppress_embeds: bool = MISSING,
) -> WebhookMessage:
...
@overload
async def send(
self,
content: str = MISSING,
*,
username: str = MISSING,
avatar_url: Any = MISSING,
tts: bool = MISSING,
ephemeral: bool = MISSING,
file: File = MISSING,
files: Sequence[File] = MISSING,
embed: Embed = MISSING,
embeds: Sequence[Embed] = MISSING,
allowed_mentions: AllowedMentions = MISSING,
view: View = MISSING,
thread: Snowflake = MISSING,
thread_name: str = MISSING,
wait: Literal[False] = ...,
suppress_embeds: bool = MISSING,
) -> None:
...
async def send(
self,
content: str = MISSING,
*,
username: str = MISSING,
avatar_url: Any = MISSING,
tts: bool = False,
ephemeral: bool = False,
file: File = MISSING,
files: Sequence[File] = MISSING,
embed: Embed = MISSING,
embeds: Sequence[Embed] = MISSING,
allowed_mentions: AllowedMentions = MISSING,
view: View = MISSING,
thread: Snowflake = MISSING,
thread_name: str = MISSING,
wait: bool = False,
suppress_embeds: bool = False,
) -> Optional[WebhookMessage]:
"""|coro|
Sends a message using the webhook.
The content must be a type that can convert to a string through ``str(content)``.
To upload a single file, the ``file`` parameter should be used with a
single :class:`File` object.
If the ``embed`` parameter is provided, it must be of type :class:`Embed` and
it must be a rich embed type. You cannot mix the ``embed`` parameter with the
``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
content: :class:`str`
The content of the message to send.
wait: :class:`bool`
Whether the server should wait before sending a response. This essentially
means that the return type of this function changes from ``None`` to
a :class:`WebhookMessage` if set to ``True``. If the type of webhook
is :attr:`WebhookType.application` then this is always set to ``True``.
username: :class:`str`
The username to send with this message. If no username is provided
then the default username for the webhook is used.
avatar_url: :class:`str`
The avatar URL to send with this message. If no avatar URL is provided
then the default avatar for the webhook is used. If this is not a
string then it is explicitly cast using ``str``.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
ephemeral: :class:`bool`
Indicates if the message should only be visible to the user.
This is only available to :attr:`WebhookType.application` webhooks.
If a view is sent with an ephemeral message and it has no timeout set
then the timeout is set to 15 minutes.
.. versionadded:: 2.0
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
embed: :class:`Embed`
The rich embed for the content to send. This cannot be mixed with
``embeds`` parameter.
embeds: List[:class:`Embed`]
A list of embeds to send with the content. Maximum of 10. This cannot
be mixed with the ``embed`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
.. versionadded:: 1.4
view: :class:`discord.ui.View`
The view to send with the message. You can only send a view
if this webhook is not partial and has state attached. A
webhook has state attached if the webhook is managed by the
library.
.. versionadded:: 2.0
thread: :class:`~discord.abc.Snowflake`
The thread to send this webhook to.
.. versionadded:: 2.0
thread_name: :class:`str`
The thread name to create with this webhook if the webhook belongs
to a :class:`~discord.ForumChannel`. Note that this is mutually
exclusive with the ``thread`` parameter, as this will create a
new thread with the given name.
.. versionadded:: 2.0
suppress_embeds: :class:`bool`
Whether to suppress embeds for the message. This sends the message without any embeds if set to ``True``.
.. versionadded:: 2.0
Raises
--------
HTTPException
Sending the message failed.
NotFound
This webhook was not found.
Forbidden
The authorization token for the webhook is incorrect.
TypeError
You specified both ``embed`` and ``embeds`` or ``file`` and ``files``
or ``thread`` and ``thread_name``.
ValueError
The length of ``embeds`` was invalid, there was no token
associated with this webhook or ``ephemeral`` was passed
with the improper webhook type or there was no state
attached with this webhook when giving it a view.
Returns
---------
Optional[:class:`WebhookMessage`]
If ``wait`` is ``True`` then the message that was sent, otherwise ``None``.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
previous_mentions: Optional[AllowedMentions] = getattr(self._state, 'allowed_mentions', None)
if content is None:
content = MISSING
if ephemeral or suppress_embeds:
flags = MessageFlags._from_value(0)
flags.ephemeral = ephemeral
flags.suppress_embeds = suppress_embeds
else:
flags = MISSING
application_webhook = self.type is WebhookType.application
if ephemeral and not application_webhook:
raise ValueError('ephemeral messages can only be sent from application webhooks')
if application_webhook:
wait = True
if view is not MISSING:
if isinstance(self._state, _WebhookState):
raise ValueError('Webhook views require an associated state with the webhook')
if not hasattr(view, '__discord_ui_view__'):
raise TypeError(f'expected view parameter to be of type View not {view.__class__!r}')
if ephemeral is True and view.timeout is None:
view.timeout = 15 * 60.0
if thread_name is not MISSING and thread is not MISSING:
raise TypeError('Cannot mix thread_name and thread keyword arguments.')
params = handle_message_parameters(
content=content,
username=username,
avatar_url=avatar_url,
tts=tts,
file=file,
files=files,
embed=embed,
embeds=embeds,
flags=flags,
view=view,
thread_name=thread_name,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_mentions,
)
adapter = async_context.get()
thread_id: Optional[int] = None
if thread is not MISSING:
thread_id = thread.id
data = await adapter.execute_webhook(
self.id,
self.token,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
payload=params.payload,
multipart=params.multipart,
files=params.files,
thread_id=thread_id,
wait=wait,
)
msg = None
if wait:
msg = self._create_message(data, thread=thread)
if view is not MISSING and not view.is_finished():
message_id = None if msg is None else msg.id
self._state.store_view(view, message_id)
return msg
async def fetch_message(self, id: int, /, *, thread: Snowflake = MISSING) -> WebhookMessage:
"""|coro|
Retrieves a single :class:`~discord.WebhookMessage` owned by this webhook.
.. versionadded:: 2.0
Parameters
------------
id: :class:`int`
The message ID to look for.
thread: :class:`~discord.abc.Snowflake`
The thread to look in.
Raises
--------
~discord.NotFound
The specified message was not found.
~discord.Forbidden
You do not have the permissions required to get a message.
~discord.HTTPException
Retrieving the message failed.
ValueError
There was no token associated with this webhook.
Returns
--------
:class:`~discord.WebhookMessage`
The message asked for.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
thread_id: Optional[int] = None
if thread is not MISSING:
thread_id = thread.id
adapter = async_context.get()
data = await adapter.get_webhook_message(
self.id,
self.token,
id,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
thread_id=thread_id,
)
return self._create_message(data, thread=thread)
async def edit_message(
self,
message_id: int,
*,
content: Optional[str] = MISSING,
embeds: Sequence[Embed] = MISSING,
embed: Optional[Embed] = MISSING,
attachments: Sequence[Union[Attachment, File]] = MISSING,
view: Optional[View] = MISSING,
allowed_mentions: Optional[AllowedMentions] = None,
thread: Snowflake = MISSING,
) -> WebhookMessage:
"""|coro|
Edits a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.edit` in case
you only have an ID.
.. versionadded:: 1.6
.. versionchanged:: 2.0
The edit is no longer in-place, instead the newly edited message is returned.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
message_id: :class:`int`
The message ID to edit.
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. versionadded:: 2.0
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed. The webhook must have state attached, similar to
:meth:`send`.
.. versionadded:: 2.0
thread: :class:`~discord.abc.Snowflake`
The thread the webhook message belongs to.
.. versionadded:: 2.0
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
TypeError
You specified both ``embed`` and ``embeds``
ValueError
The length of ``embeds`` was invalid,
there was no token associated with this webhook or the webhook had
no state.
Returns
--------
:class:`WebhookMessage`
The newly edited webhook message.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
if view is not MISSING:
if isinstance(self._state, _WebhookState):
raise ValueError('This webhook does not have state associated with it')
self._state.prevent_view_updates_for(message_id)
previous_mentions: Optional[AllowedMentions] = getattr(self._state, 'allowed_mentions', None)
params = handle_message_parameters(
content=content,
attachments=attachments,
embed=embed,
embeds=embeds,
view=view,
allowed_mentions=allowed_mentions,
previous_allowed_mentions=previous_mentions,
)
thread_id: Optional[int] = None
if thread is not MISSING:
thread_id = thread.id
adapter = async_context.get()
data = await adapter.edit_webhook_message(
self.id,
self.token,
message_id,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
payload=params.payload,
multipart=params.multipart,
files=params.files,
thread_id=thread_id,
)
message = self._create_message(data, thread=thread)
if view and not view.is_finished():
self._state.store_view(view, message_id)
return message
async def delete_message(self, message_id: int, /, *, thread: Snowflake = MISSING) -> None:
"""|coro|
Deletes a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.delete` in case
you only have an ID.
.. versionadded:: 1.6
.. versionchanged:: 2.0
``message_id`` parameter is now positional-only.
.. versionchanged:: 2.0
This function will now raise :exc:`ValueError` instead of
``InvalidArgument``.
Parameters
------------
message_id: :class:`int`
The message ID to delete.
thread: :class:`~discord.abc.Snowflake`
The thread the webhook message belongs to.
.. versionadded:: 2.0
Raises
-------
HTTPException
Deleting the message failed.
Forbidden
Deleted a message that is not yours.
ValueError
This webhook does not have a token associated with it.
"""
if self.token is None:
raise ValueError('This webhook does not have a token associated with it')
thread_id: Optional[int] = None
if thread is not MISSING:
thread_id = thread.id
adapter = async_context.get()
await adapter.delete_webhook_message(
self.id,
self.token,
message_id,
session=self.session,
proxy=self.proxy,
proxy_auth=self.proxy_auth,
thread_id=thread_id,
)
| mit | abbb1ec425ea3618310245726a9712c5 | 32.789848 | 143 | 0.572695 | 4.335982 | false | false | false | false |
explosion/thinc | thinc/tests/layers/test_transforms.py | 2 | 1975 | from thinc.api import strings2arrays, NumpyOps, Ragged, registry
import numpy
import pytest
from ..util import get_data_checker
@pytest.fixture(params=[[], [(10, 2)], [(5, 3), (1, 3)], [(2, 3), (0, 3), (1, 3)]])
def shapes(request):
return request.param
@pytest.fixture
def ops():
return NumpyOps()
@pytest.fixture
def list_data(shapes):
return [numpy.zeros(shape, dtype="f") for shape in shapes]
@pytest.fixture
def ragged_data(ops, list_data):
lengths = numpy.array([len(x) for x in list_data], dtype="i")
if not list_data:
return Ragged(ops.alloc2f(0, 0), lengths)
else:
return Ragged(ops.flatten(list_data), lengths)
@pytest.fixture
def padded_data(ops, list_data):
return ops.list2padded(list_data)
@pytest.fixture
def array_data(ragged_data):
return ragged_data.data
def check_transform(transform, in_data, out_data):
model = registry.resolve({"config": {"@layers": transform}})["config"]
input_checker = get_data_checker(in_data)
output_checker = get_data_checker(out_data)
model.initialize(in_data, out_data)
Y, backprop = model(in_data, is_train=True)
output_checker(Y, out_data)
dX = backprop(Y)
input_checker(dX, in_data)
def test_list2array(list_data, array_data):
check_transform("list2array.v1", list_data, array_data)
def test_list2ragged(list_data, ragged_data):
check_transform("list2ragged.v1", list_data, ragged_data)
def test_list2padded(list_data, padded_data):
check_transform("list2padded.v1", list_data, padded_data)
def test_ragged2list(ragged_data, list_data):
check_transform("ragged2list.v1", ragged_data, list_data)
def test_padded2list(padded_data, list_data):
check_transform("padded2list.v1", padded_data, list_data)
def test_strings2arrays():
strings = ["hello", "world"]
model = strings2arrays()
Y, backprop = model.begin_update(strings)
assert len(Y) == len(strings)
assert backprop([]) == []
| mit | b9ea6f1a6f790ce9f286bfa954966288 | 24.320513 | 83 | 0.682025 | 3.095611 | false | true | false | false |
explosion/thinc | thinc/layers/with_nvtx_range.py | 2 | 1293 | from typing import Optional, Callable, Any, Tuple, TypeVar
from ..model import Model
from ..util import use_nvtx_range
_ModelT = TypeVar("_ModelT", bound=Model)
def with_nvtx_range(
layer: _ModelT,
name: Optional[str] = None,
*,
forward_color: int = -1,
backprop_color: int = -1,
) -> _ModelT:
"""Wraps any layer and marks the forward and backprop phases as
NVTX ranges for CUDA profiling.
By default, the name of the layer is used as the name of the range,
followed by the name of the pass.
"""
name = layer.name if name is None else name
orig_forward = layer._func
orig_init = layer.init
def forward(model: Model, X: Any, is_train: bool) -> Tuple[Any, Callable]:
with use_nvtx_range(f"{name} forward", forward_color):
layer_Y, layer_callback = orig_forward(model, X, is_train=is_train)
def backprop(dY: Any) -> Any:
with use_nvtx_range(f"{name} backprop", backprop_color):
return layer_callback(dY)
return layer_Y, backprop
def init(_model: Model, X: Any, Y: Any) -> Model:
if orig_init is not None:
return orig_init(layer, X, Y)
else:
return layer
layer.replace_callbacks(forward, init=init)
return layer
| mit | 6eb2cd04350ddc7b8ef4d0c73c04c0b6 | 27.108696 | 79 | 0.622583 | 3.494595 | false | false | false | false |
b1naryth1ef/rowboat | rowboat/util/__init__.py | 3 | 1771 | from __future__ import absolute_import
import re
import yaml
from collections import OrderedDict
from datetime import datetime
from gevent.local import local
# Invisible space that can be used to escape mentions
ZERO_WIDTH_SPACE = u'\u200B'
# Replacement grave accent that can be used to escape codeblocks
MODIFIER_GRAVE_ACCENT = u'\u02CB'
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
INVITE_DOMAIN_RE = re.compile(r'(discord.gg|discordapp.com/invite)')
def C(txt, codeblocks=False):
# Do some basic safety checks:
txt = txt.replace('@', '@' + ZERO_WIDTH_SPACE)
if codeblocks:
txt = escape_codeblocks(txt)
return INVITE_DOMAIN_RE.sub('\g<0>' + ZERO_WIDTH_SPACE, txt)
def escape_codeblocks(txt):
return txt.replace('`', MODIFIER_GRAVE_ACCENT)
class LocalProxy(object):
def __init__(self):
self.local = local()
def set(self, other):
self.local.obj = other
def get(self):
return self.local.obj
def __getattr__(self, attr):
return getattr(self.local.obj, attr)
class MetaException(Exception):
def __init__(self, msg, metadata=None):
self.msg = msg
self.metadata = metadata
super(MetaException, self).__init__(msg)
def default_json(obj):
if isinstance(obj, datetime):
return obj.isoformat()
return TypeError('Type %s is not serializable' % type(obj))
| mit | 97c539830ccea4243276dce2eb93b8f1 | 23.943662 | 76 | 0.678713 | 3.63655 | false | false | false | false |
b1naryth1ef/rowboat | rowboat/tasks/backfill.py | 2 | 1204 | from . import task, get_client
from rowboat.models.message import Message
from disco.types.channel import MessageIterator
@task(max_concurrent=1, max_queue_size=10, global_lock=lambda guild_id: guild_id)
def backfill_guild(task, guild_id):
client = get_client()
for channel in client.api.guilds_channels_list(guild_id).values():
backfill_channel.queue(channel.id)
@task(max_concurrent=6, max_queue_size=500, global_lock=lambda channel_id: channel_id)
def backfill_channel(task, channel_id):
client = get_client()
channel = client.api.channels_get(channel_id)
# Hack the state
client.state.channels[channel.id] = channel
if channel.guild_id:
client.state.guilds[channel.guild_id] = client.api.guilds_get(channel.guild_id)
scanned = 0
inserted = 0
msgs_iter = MessageIterator(client, channel, bulk=True, after=1, direction=MessageIterator.Direction.DOWN)
for chunk in msgs_iter:
if not chunk:
break
scanned += len(chunk)
inserted += len(Message.from_disco_message_many(chunk, safe=True))
task.log.info('Completed backfill on channel %s, %s scanned and %s inserted', channel_id, scanned, inserted)
| mit | 25919fc61db65ef0858400be329de65b | 34.411765 | 112 | 0.703488 | 3.40113 | false | false | false | false |
b1naryth1ef/rowboat | rowboat/plugins/infractions.py | 1 | 26065 | import csv
import gevent
import humanize
from StringIO import StringIO
from holster.emitter import Priority
from datetime import datetime
from disco.bot import CommandLevels
from disco.types.user import User as DiscoUser
from disco.types.message import MessageTable, MessageEmbed
from rowboat.plugins import RowboatPlugin as Plugin, CommandFail, CommandSuccess
from rowboat.util.timing import Eventual
from rowboat.util.input import parse_duration
from rowboat.types import Field, snowflake
from rowboat.types.plugin import PluginConfig
from rowboat.plugins.modlog import Actions
from rowboat.models.user import User, Infraction
from rowboat.models.guild import GuildMemberBackup, GuildBan
from rowboat.constants import (
GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID, GREEN_TICK_EMOJI, RED_TICK_EMOJI
)
def clamp(string, size):
if len(string) > size:
return string[:size] + '...'
return string
def maybe_string(obj, exists, notexists, **kwargs):
if obj:
return exists.format(o=obj, **kwargs)
return notexists.format(**kwargs)
class InfractionsConfig(PluginConfig):
# Whether to confirm actions in the channel they are executed
confirm_actions = Field(bool, default=True)
confirm_actions_reaction = Field(bool, default=False)
confirm_actions_expiry = Field(int, default=0)
# Whether to notify users on actions
notify_actions = Field(bool, default=False)
# The mute role
mute_role = Field(snowflake, default=None)
# Level required to edit reasons
reason_edit_level = Field(int, default=int(CommandLevels.ADMIN))
@Plugin.with_config(InfractionsConfig)
class InfractionsPlugin(Plugin):
def load(self, ctx):
super(InfractionsPlugin, self).load(ctx)
self.inf_task = Eventual(self.clear_infractions)
self.spawn_later(5, self.queue_infractions)
def queue_infractions(self):
next_infraction = list(Infraction.select().where(
(Infraction.active == 1) &
(~(Infraction.expires_at >> None))
).order_by(Infraction.expires_at.asc()).limit(1))
if not next_infraction:
self.log.info('[INF] no infractions to wait for')
return
self.log.info('[INF] waiting until %s for %s', next_infraction[0].expires_at, next_infraction[0].id)
self.inf_task.set_next_schedule(next_infraction[0].expires_at)
def clear_infractions(self):
expired = list(Infraction.select().where(
(Infraction.active == 1) &
(Infraction.expires_at < datetime.utcnow())
))
self.log.info('[INF] attempting to clear %s expired infractions', len(expired))
for item in expired:
guild = self.state.guilds.get(item.guild_id)
if not guild:
self.log.warning('[INF] failed to clear infraction %s, no guild exists', item.id)
continue
# TODO: hacky
type_ = {i.index: i for i in Infraction.Types.attrs}[item.type_]
if type_ == Infraction.Types.TEMPBAN:
self.call(
'ModLogPlugin.create_debounce',
guild.id,
['GuildBanRemove'],
user_id=item.user_id,
)
guild.delete_ban(item.user_id)
# TODO: perhaps join on users above and use username from db
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_TEMPBAN_EXPIRE,
guild.id,
user_id=item.user_id,
user=unicode(self.state.users.get(item.user_id) or item.user_id),
inf=item
)
elif type_ == Infraction.Types.TEMPMUTE or Infraction.Types.TEMPROLE:
member = guild.get_member(item.user_id)
if member:
if item.metadata['role'] in member.roles:
self.call(
'ModLogPlugin.create_debounce',
guild.id,
['GuildMemberUpdate'],
user_id=item.user_id,
role_id=item.metadata['role'],
)
member.remove_role(item.metadata['role'])
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_TEMPMUTE_EXPIRE,
guild.id,
member=member,
inf=item
)
else:
GuildMemberBackup.remove_role(
item.guild_id,
item.user_id,
item.metadata['role'])
else:
self.log.warning('[INF] failed to clear infraction %s, type is invalid %s', item.id, item.type_)
continue
# TODO: n+1
item.active = False
item.save()
# Wait a few seconds to backoff from a possible bad loop, and requeue new infractions
gevent.sleep(5)
self.queue_infractions()
@Plugin.listen('GuildMemberUpdate', priority=Priority.BEFORE)
def on_guild_member_update(self, event):
pre_member = event.guild.members.get(event.id)
if not pre_member:
return
pre_roles = set(pre_member.roles)
post_roles = set(event.roles)
if pre_roles == post_roles:
return
removed = pre_roles - post_roles
# If the user was unmuted, mark any temp-mutes as inactive
if event.config.mute_role in removed:
Infraction.clear_active(event, event.user.id, [Infraction.Types.TEMPMUTE])
@Plugin.listen('GuildBanRemove')
def on_guild_ban_remove(self, event):
Infraction.clear_active(event, event.user.id, [Infraction.Types.BAN, Infraction.Types.TEMPBAN])
@Plugin.command('unban', '<user:snowflake> [reason:str...]', level=CommandLevels.MOD)
def unban(self, event, user, reason=None):
try:
GuildBan.get(user_id=user, guild_id=event.guild.id)
event.guild.delete_ban(user)
except GuildBan.DoesNotExist:
raise CommandFail('user with id `{}` is not banned'.format(user))
Infraction.create(
guild_id=event.guild.id,
user_id=user,
actor_id=event.author.id,
type_=Infraction.Types.UNBAN,
reason=reason
)
raise CommandSuccess('unbanned user with id `{}`'.format(user))
@Plugin.command('archive', group='infractions', level=CommandLevels.ADMIN)
def infractions_archive(self, event):
user = User.alias()
actor = User.alias()
q = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(Infraction.guild_id == event.guild.id)
buff = StringIO()
w = csv.writer(buff)
for inf in q:
w.writerow([
inf.id,
inf.user_id,
unicode(inf.user).encode('utf-8'),
inf.actor_id,
unicode(inf.actor).encode('utf-8'),
unicode({i.index: i for i in Infraction.Types.attrs}[inf.type_]).encode('utf-8'),
unicode(inf.reason).encode('utf-8'),
])
event.msg.reply('Ok, here is an archive of all infractions', attachments=[
('infractions.csv', buff.getvalue())
])
@Plugin.command('info', '<infraction:int>', group='infractions', level=CommandLevels.MOD)
def infraction_info(self, event, infraction):
try:
user = User.alias()
actor = User.alias()
infraction = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(
(Infraction.id == infraction) &
(Infraction.guild_id == event.guild.id)
).get()
except Infraction.DoesNotExist:
raise CommandFail('cannot find an infraction with ID `{}`'.format(infraction))
type_ = {i.index: i for i in Infraction.Types.attrs}[infraction.type_]
embed = MessageEmbed()
if type_ in (Infraction.Types.MUTE, Infraction.Types.TEMPMUTE, Infraction.Types.TEMPROLE):
embed.color = 0xfdfd96
elif type_ in (Infraction.Types.KICK, Infraction.Types.SOFTBAN):
embed.color = 0xffb347
else:
embed.color = 0xff6961
embed.title = str(type_).title()
embed.set_thumbnail(url=infraction.user.get_avatar_url())
embed.add_field(name='User', value=unicode(infraction.user), inline=True)
embed.add_field(name='Moderator', value=unicode(infraction.actor), inline=True)
embed.add_field(name='Active', value='yes' if infraction.active else 'no', inline=True)
if infraction.active and infraction.expires_at:
embed.add_field(name='Expires', value=humanize.naturaldelta(infraction.expires_at - datetime.utcnow()))
embed.add_field(name='Reason', value=infraction.reason or '_No Reason Given', inline=False)
embed.timestamp = infraction.created_at.isoformat()
event.msg.reply('', embed=embed)
@Plugin.command('search', '[query:user|str...]', group='infractions', level=CommandLevels.MOD)
def infraction_search(self, event, query=None):
q = (Infraction.guild_id == event.guild.id)
if query and isinstance(query, list) and isinstance(query[0], DiscoUser):
query = query[0].id
elif query:
query = ' '.join(query)
if query and (isinstance(query, int) or query.isdigit()):
q &= (
(Infraction.id == int(query)) |
(Infraction.user_id == int(query)) |
(Infraction.actor_id == int(query)))
elif query:
q &= (Infraction.reason ** query)
user = User.alias()
actor = User.alias()
infractions = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(q).order_by(Infraction.created_at.desc()).limit(6)
tbl = MessageTable()
tbl.set_header('ID', 'Created', 'Type', 'User', 'Moderator', 'Active', 'Reason')
for inf in infractions:
type_ = {i.index: i for i in Infraction.Types.attrs}[inf.type_]
reason = inf.reason or ''
if len(reason) > 256:
reason = reason[:256] + '...'
if inf.active:
active = 'yes'
if inf.expires_at:
active += ' (expires in {})'.format(humanize.naturaldelta(inf.expires_at - datetime.utcnow()))
else:
active = 'no'
tbl.add(
inf.id,
inf.created_at.isoformat(),
str(type_),
unicode(inf.user),
unicode(inf.actor),
active,
clamp(reason, 128)
)
event.msg.reply(tbl.compile())
@Plugin.command('recent', aliases=['latest'], group='infractions', level=CommandLevels.MOD)
def infractions_recent(self, event):
# TODO: fucking write this bruh
pass
@Plugin.command('duration', '<infraction:int> <duration:str>', group='infractions', level=CommandLevels.MOD)
def infraction_duration(self, event, infraction, duration):
try:
inf = Infraction.get(id=infraction)
except Infraction.DoesNotExist:
raise CommandFail('invalid infraction (try `!infractions recent`)')
if inf.actor_id != event.author.id and event.user_level < CommandLevels.ADMIN:
raise CommandFail('only administrators can modify the duration of infractions created by other moderators')
if not inf.active:
raise CommandFail('that infraction is not active and cannot be updated')
expires_dt = parse_duration(duration, inf.created_at)
converted = False
if inf.type_ in [Infraction.Types.MUTE.index, Infraction.Types.BAN.index]:
inf.type_ = (
Infraction.Types.TEMPMUTE
if inf.type_ == Infraction.Types.MUTE.index else
Infraction.Types.TEMPBAN
)
converted = True
elif inf.type_ not in [
Infraction.Types.TEMPMUTE.index,
Infraction.Types.TEMPBAN.index,
Infraction.Types.TEMPROLE.index]:
raise CommandFail('cannot set the duration for that type of infraction')
inf.expires_at = expires_dt
inf.save()
self.queue_infractions()
if converted:
raise CommandSuccess('ok, I\'ve made that infraction temporary, it will now expire on {}'.format(
inf.expires_at.isoformat()
))
else:
raise CommandSuccess('ok, I\'ve updated that infractions duration, it will now expire on {}'.format(
inf.expires_at.isoformat()
))
@Plugin.command('reason', '<infraction:int> <reason:str...>', group='infractions', level=CommandLevels.MOD)
def reason(self, event, infraction, reason):
try:
inf = Infraction.get(id=infraction)
except Infraction.DoesNotExist:
inf = None
if inf is None or inf.guild_id != event.guild.id:
event.msg.reply('Unknown infraction ID')
return
if not inf.actor_id:
inf.actor_id = event.author.id
if inf.actor_id != event.author.id and event.user_level < event.config.reason_edit_level:
raise CommandFail('you do not have the permissions required to edit other moderators infractions')
inf.reason = reason
inf.save()
raise CommandSuccess('I\'ve updated the reason for infraction #{}'.format(inf.id))
def can_act_on(self, event, victim_id, throw=True):
if event.author.id == victim_id:
if not throw:
return False
raise CommandFail('cannot execute that action on yourself')
victim_level = self.bot.plugins.get('CorePlugin').get_level(event.guild, victim_id)
if event.user_level <= victim_level:
if not throw:
return False
raise CommandFail('invalid permissions')
return True
def confirm_action(self, event, message):
if not event.config.confirm_actions:
return
if event.config.confirm_actions_reaction:
event.msg.add_reaction(GREEN_TICK_EMOJI)
return
msg = event.msg.reply(message)
if event.config.confirm_actions_expiry > 0:
# Close over this thread local
expiry = event.config.confirm_actions_expiry
def f():
gevent.sleep(expiry)
msg.delete()
# Run this in a greenlet so we dont block event execution
self.spawn(f)
@Plugin.command('mute', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
@Plugin.command('tempmute', '<user:user|snowflake> <duration:str> [reason:str...]', level=CommandLevels.MOD)
def tempmute(self, event, user, duration=None, reason=None):
if not duration and reason:
duration = parse_duration(reason.split(' ')[0], safe=True)
if duration:
if ' ' in reason:
reason = reason.split(' ', 1)[-1]
else:
reason = None
elif duration:
duration = parse_duration(duration)
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
if not event.config.mute_role:
raise CommandFail('mute is not setup on this server')
if event.config.mute_role in member.roles:
raise CommandFail(u'{} is already muted'.format(member.user))
# If we have a duration set, this is a tempmute
if duration:
# Create the infraction
Infraction.tempmute(self, event, member, reason, duration)
self.queue_infractions()
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: {u} is now muted for {t} (`{o}`)',
u':ok_hand: {u} is now muted for {t}',
u=member.user,
t=humanize.naturaldelta(duration - datetime.utcnow()),
))
else:
existed = False
# If the user is already muted check if we can take this from a temp
# to perma mute.
if event.config.mute_role in member.roles:
existed = Infraction.clear_active(event, member.id, [Infraction.Types.TEMPMUTE])
# The user is 100% muted and not tempmuted at this point, so lets bail
if not existed:
raise CommandFail(u'{} is already muted'.format(member.user))
Infraction.mute(self, event, member, reason)
existed = u' [was temp-muted]' if existed else ''
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: {u} is now muted (`{o}`)' + existed,
u':ok_hand: {u} is now muted' + existed,
u=member.user,
))
else:
raise CommandFail('invalid user')
@Plugin.command(
'temprole',
'<user:user|snowflake> <role:snowflake|str> <duration:str> [reason:str...]',
level=CommandLevels.MOD)
def temprole(self, event, user, role, duration, reason=None):
member = event.guild.get_member(user)
if not member:
raise CommandFail('invalid user')
self.can_act_on(event, member.id)
role_id = role if isinstance(role, (int, long)) else event.config.role_aliases.get(role.lower())
if not role_id or role_id not in event.guild.roles:
raise CommandFail('invalid or unknown role')
if role_id in member.roles:
raise CommandFail(u'{} is already in that role'.format(member.user))
expire_dt = parse_duration(duration)
Infraction.temprole(self, event, member, role_id, reason, expire_dt)
self.queue_infractions()
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: {u} is now in the {r} role for {t} (`{o}`)',
u':ok_hand: {u} is now in the {r} role for {t}',
r=event.guild.roles[role_id].name,
u=member.user,
t=humanize.naturaldelta(expire_dt - datetime.utcnow()),
))
@Plugin.command('unmute', '<user:user|snowflake>', level=CommandLevels.MOD)
def unmute(self, event, user, reason=None):
# TOOD: eventually we should pull the role from the GuildMemberBackup if they arent in server
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
if not event.config.mute_role:
raise CommandFail('mute is not setup on this server')
if event.config.mute_role not in member.roles:
raise CommandFail(u'{} is not muted'.format(member.user))
Infraction.clear_active(event, member.id, [Infraction.Types.MUTE, Infraction.Types.TEMPMUTE])
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
role_id=event.config.mute_role,
)
member.remove_role(event.config.mute_role)
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_UNMUTED,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
)
self.confirm_action(event, u':ok_hand: {} is now unmuted'.format(member.user))
else:
raise CommandFail('invalid user')
@Plugin.command('kick', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
def kick(self, event, user, reason=None):
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
Infraction.kick(self, event, member, reason)
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: kicked {u} (`{o}`)',
u':ok_hand: kicked {u}',
u=member.user,
))
else:
raise CommandFail('invalid user')
@Plugin.command('mkick', parser=True, level=CommandLevels.MOD)
@Plugin.parser.add_argument('users', type=long, nargs='+')
@Plugin.parser.add_argument('-r', '--reason', default='', help='reason for modlog')
def mkick(self, event, args):
members = []
for user_id in args.users:
member = event.guild.get_member(user_id)
if not member:
# TODO: this sucks, batch these
raise CommandFail('failed to kick {}, user not found'.format(user_id))
if not self.can_act_on(event, member.id, throw=False):
raise CommandFail('failed to kick {}, invalid permissions'.format(user_id))
members.append(member)
msg = event.msg.reply('Ok, kick {} users for `{}`?'.format(len(members), args.reason or 'no reason'))
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
for member in members:
Infraction.kick(self, event, member, args.reason)
raise CommandSuccess('kicked {} users'.format(len(members)))
@Plugin.command('ban', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
@Plugin.command('forceban', '<user:snowflake> [reason:str...]', level=CommandLevels.MOD)
def ban(self, event, user, reason=None):
member = None
if isinstance(user, (int, long)):
self.can_act_on(event, user)
Infraction.ban(self, event, user, reason, guild=event.guild)
else:
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
Infraction.ban(self, event, member, reason, guild=event.guild)
else:
raise CommandFail('invalid user')
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: banned {u} (`{o}`)',
u':ok_hand: banned {u}',
u=member.user if member else user,
))
@Plugin.command('softban', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
def softban(self, event, user, reason=None):
"""
Ban then unban a user from the server (with an optional reason for the modlog)
"""
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
Infraction.softban(self, event, member, reason)
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: soft-banned {u} (`{o}`)',
u':ok_hand: soft-banned {u}',
u=member.user,
))
else:
raise CommandFail('invald user')
@Plugin.command('tempban', '<user:user|snowflake> <duration:str> [reason:str...]', level=CommandLevels.MOD)
def tempban(self, event, duration, user, reason=None):
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
expires_dt = parse_duration(duration)
Infraction.tempban(self, event, member, reason, expires_dt)
self.queue_infractions()
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: temp-banned {u} for {t} (`{o}`)',
u':ok_hand: temp-banned {u} for {t}',
u=member.user,
t=humanize.naturaldelta(expires_dt - datetime.utcnow()),
))
else:
raise CommandFail('invalid user')
@Plugin.command('warn', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
def warn(self, event, user, reason=None):
member = None
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
Infraction.warn(self, event, member, reason, guild=event.guild)
else:
raise CommandFail('invalid user')
self.confirm_action(event, maybe_string(
reason,
u':ok_hand: warned {u} (`{o}`)',
u':ok_hand: warned {u}',
u=member.user if member else user,
))
| mit | 3776e80bd810e04da1e899a2e88bbc8c | 37.614815 | 119 | 0.563821 | 3.936122 | false | false | false | false |
craffel/mir_eval | mir_eval/pattern.py | 4 | 23544 | """
Pattern discovery involves the identification of musical patterns (i.e. short
fragments or melodic ideas that repeat at least twice) both from audio and
symbolic representations. The metrics used to evaluate pattern discovery
systems attempt to quantify the ability of the algorithm to not only determine
the present patterns in a piece, but also to find all of their occurrences.
Based on the methods described here:
T. Collins. MIREX task: Discovery of repeated themes & sections.
http://www.music-ir.org/mirex/wiki/2013:Discovery_of_Repeated_Themes_&_Sections,
2013.
Conventions
-----------
The input format can be automatically generated by calling
:func:`mir_eval.io.load_patterns`. This format is a list of a list of
tuples. The first list collections patterns, each of which is a list of
occurences, and each occurrence is a list of MIDI onset tuples of
``(onset_time, mid_note)``
A pattern is a list of occurrences. The first occurrence must be the prototype
of that pattern (i.e. the most representative of all the occurrences). An
occurrence is a list of tuples containing the onset time and the midi note
number.
Metrics
-------
* :func:`mir_eval.pattern.standard_FPR`: Strict metric in order to find the
possibly transposed patterns of exact length. This is the only metric that
considers transposed patterns.
* :func:`mir_eval.pattern.establishment_FPR`: Evaluates the amount of patterns
that were successfully identified by the estimated results, no matter how
many occurrences they found. In other words, this metric captures how the
algorithm successfully *established* that a pattern repeated at least twice,
and this pattern is also found in the reference annotation.
* :func:`mir_eval.pattern.occurrence_FPR`: Evaluation of how well an estimation
can effectively identify all the occurrences of the found patterns,
independently of how many patterns have been discovered. This metric has a
threshold parameter that indicates how similar two occurrences must be in
order to be considered equal. In MIREX, this evaluation is run twice, with
thresholds .75 and .5.
* :func:`mir_eval.pattern.three_layer_FPR`: Aims to evaluate the general
similarity between the reference and the estimations, combining both the
establishment of patterns and the retrieval of its occurrences in a single F1
score.
* :func:`mir_eval.pattern.first_n_three_layer_P`: Computes the three-layer
precision for the first N patterns only in order to measure the ability of
the algorithm to sort the identified patterns based on their relevance.
* :func:`mir_eval.pattern.first_n_target_proportion_R`: Computes the target
proportion recall for the first N patterns only in order to measure the
ability of the algorithm to sort the identified patterns based on their
relevance.
"""
import numpy as np
from . import util
import warnings
import collections
def _n_onset_midi(patterns):
"""Computes the number of onset_midi objects in a pattern
Parameters
----------
patterns :
A list of patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
Returns
-------
n_onsets : int
Number of onsets within the pattern.
"""
return len([o_m for pat in patterns for occ in pat for o_m in occ])
def validate(reference_patterns, estimated_patterns):
"""Checks that the input annotations to a metric look like valid pattern
lists, and throws helpful errors if not.
Parameters
----------
reference_patterns : list
The reference patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
Returns
-------
"""
# Warn if pattern lists are empty
if _n_onset_midi(reference_patterns) == 0:
warnings.warn('Reference patterns are empty.')
if _n_onset_midi(estimated_patterns) == 0:
warnings.warn('Estimated patterns are empty.')
for patterns in [reference_patterns, estimated_patterns]:
for pattern in patterns:
if len(pattern) <= 0:
raise ValueError("Each pattern must contain at least one "
"occurrence.")
for occurrence in pattern:
for onset_midi in occurrence:
if len(onset_midi) != 2:
raise ValueError("The (onset, midi) tuple must "
"contain exactly 2 elements.")
def _occurrence_intersection(occ_P, occ_Q):
"""Computes the intersection between two occurrences.
Parameters
----------
occ_P : list of tuples
(onset, midi) pairs representing the reference occurrence.
occ_Q : list
second list of (onset, midi) tuples
Returns
-------
S : set
Set of the intersection between occ_P and occ_Q.
"""
set_P = set([tuple(onset_midi) for onset_midi in occ_P])
set_Q = set([tuple(onset_midi) for onset_midi in occ_Q])
return set_P & set_Q # Return the intersection
def _compute_score_matrix(P, Q, similarity_metric="cardinality_score"):
"""Computes the score matrix between the patterns P and Q.
Parameters
----------
P : list
Pattern containing a list of occurrences.
Q : list
Pattern containing a list of occurrences.
similarity_metric : str
A string representing the metric to be used
when computing the similarity matrix. Accepted values:
- "cardinality_score":
Count of the intersection between occurrences.
(Default value = "cardinality_score")
Returns
-------
sm : np.array
The score matrix between P and Q using the similarity_metric.
"""
sm = np.zeros((len(P), len(Q))) # The score matrix
for iP, occ_P in enumerate(P):
for iQ, occ_Q in enumerate(Q):
if similarity_metric == "cardinality_score":
denom = float(np.max([len(occ_P), len(occ_Q)]))
# Compute the score
sm[iP, iQ] = len(_occurrence_intersection(occ_P, occ_Q)) / \
denom
# TODO: More scores: 'normalised matching socre'
else:
raise ValueError("The similarity metric (%s) can only be: "
"'cardinality_score'.")
return sm
def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5):
"""Standard F1 Score, Precision and Recall.
This metric checks if the prototype patterns of the reference match
possible translated patterns in the prototype patterns of the estimations.
Since the sizes of these prototypes must be equal, this metric is quite
restictive and it tends to be 0 in most of 2013 MIREX results.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
tol : float
Tolerance level when comparing reference against estimation.
Default parameter is the one found in the original matlab code by
Tom Collins used for MIREX 2013.
(Default value = 1e-5)
Returns
-------
f_measure : float
The standard F1 Score
precision : float
The standard Precision
recall : float
The standard Recall
"""
validate(reference_patterns, estimated_patterns)
nP = len(reference_patterns) # Number of patterns in the reference
nQ = len(estimated_patterns) # Number of patterns in the estimation
k = 0 # Number of patterns that match
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Find matches of the prototype patterns
for ref_pattern in reference_patterns:
P = np.asarray(ref_pattern[0]) # Get reference prototype
for est_pattern in estimated_patterns:
Q = np.asarray(est_pattern[0]) # Get estimation prototype
if len(P) != len(Q):
continue
# Check transposition given a certain tolerance
if (len(P) == len(Q) == 1 or
np.max(np.abs(np.diff(P - Q, axis=0))) < tol):
k += 1
break
# Compute the standard measures
precision = k / float(nQ)
recall = k / float(nP)
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall
def establishment_FPR(reference_patterns, estimated_patterns,
similarity_metric="cardinality_score"):
"""Establishment F1 Score, Precision and Recall.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.establishment_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
similarity_metric : str
A string representing the metric to be used when computing the
similarity matrix. Accepted values:
- "cardinality_score": Count of the intersection
between occurrences.
(Default value = "cardinality_score")
Returns
-------
f_measure : float
The establishment F1 Score
precision : float
The establishment Precision
recall : float
The establishment Recall
"""
validate(reference_patterns, estimated_patterns)
nP = len(reference_patterns) # Number of elements in reference
nQ = len(estimated_patterns) # Number of elements in estimation
S = np.zeros((nP, nQ)) # Establishment matrix
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
for iP, ref_pattern in enumerate(reference_patterns):
for iQ, est_pattern in enumerate(estimated_patterns):
s = _compute_score_matrix(ref_pattern, est_pattern,
similarity_metric)
S[iP, iQ] = np.max(s)
# Compute scores
precision = np.mean(np.max(S, axis=0))
recall = np.mean(np.max(S, axis=1))
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall
def occurrence_FPR(reference_patterns, estimated_patterns, thres=.75,
similarity_metric="cardinality_score"):
"""Establishment F1 Score, Precision and Recall.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.occurrence_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
thres : float
How similar two occcurrences must be in order to be considered
equal
(Default value = .75)
similarity_metric : str
A string representing the metric to be used
when computing the similarity matrix. Accepted values:
- "cardinality_score": Count of the intersection
between occurrences.
(Default value = "cardinality_score")
Returns
-------
f_measure : float
The establishment F1 Score
precision : float
The establishment Precision
recall : float
The establishment Recall
"""
validate(reference_patterns, estimated_patterns)
# Number of elements in reference
nP = len(reference_patterns)
# Number of elements in estimation
nQ = len(estimated_patterns)
# Occurrence matrix with Precision and recall in its last dimension
O_PR = np.zeros((nP, nQ, 2))
# Index of the values that are greater than the specified threshold
rel_idx = np.empty((0, 2), dtype=int)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
for iP, ref_pattern in enumerate(reference_patterns):
for iQ, est_pattern in enumerate(estimated_patterns):
s = _compute_score_matrix(ref_pattern, est_pattern,
similarity_metric)
if np.max(s) >= thres:
O_PR[iP, iQ, 0] = np.mean(np.max(s, axis=0))
O_PR[iP, iQ, 1] = np.mean(np.max(s, axis=1))
rel_idx = np.vstack((rel_idx, [iP, iQ]))
# Compute the scores
if len(rel_idx) == 0:
precision = 0
recall = 0
else:
P = O_PR[:, :, 0]
precision = np.mean(np.max(P[np.ix_(rel_idx[:, 0], rel_idx[:, 1])],
axis=0))
R = O_PR[:, :, 1]
recall = np.mean(np.max(R[np.ix_(rel_idx[:, 0], rel_idx[:, 1])],
axis=1))
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall
def three_layer_FPR(reference_patterns, estimated_patterns):
"""Three Layer F1 Score, Precision and Recall. As described by Meridith.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
Returns
-------
f_measure : float
The three-layer F1 Score
precision : float
The three-layer Precision
recall : float
The three-layer Recall
"""
validate(reference_patterns, estimated_patterns)
def compute_first_layer_PR(ref_occs, est_occs):
"""Computes the first layer Precision and Recall values given the
set of occurrences in the reference and the set of occurrences in the
estimation.
Parameters
----------
ref_occs :
est_occs :
Returns
-------
"""
# Find the length of the intersection between reference and estimation
s = len(_occurrence_intersection(ref_occs, est_occs))
# Compute the first layer scores
precision = s / float(len(ref_occs))
recall = s / float(len(est_occs))
return precision, recall
def compute_second_layer_PR(ref_pattern, est_pattern):
"""Computes the second layer Precision and Recall values given the
set of occurrences in the reference and the set of occurrences in the
estimation.
Parameters
----------
ref_pattern :
est_pattern :
Returns
-------
"""
# Compute the first layer scores
F_1 = compute_layer(ref_pattern, est_pattern)
# Compute the second layer scores
precision = np.mean(np.max(F_1, axis=0))
recall = np.mean(np.max(F_1, axis=1))
return precision, recall
def compute_layer(ref_elements, est_elements, layer=1):
"""Computes the F-measure matrix for a given layer. The reference and
estimated elements can be either patters or occurrences, depending
on the layer.
For layer 1, the elements must be occurrences.
For layer 2, the elements must be patterns.
Parameters
----------
ref_elements :
est_elements :
layer :
(Default value = 1)
Returns
-------
"""
if layer != 1 and layer != 2:
raise ValueError("Layer (%d) must be an integer between 1 and 2"
% layer)
nP = len(ref_elements) # Number of elements in reference
nQ = len(est_elements) # Number of elements in estimation
F = np.zeros((nP, nQ)) # F-measure matrix for the given layer
for iP in range(nP):
for iQ in range(nQ):
if layer == 1:
func = compute_first_layer_PR
elif layer == 2:
func = compute_second_layer_PR
# Compute layer scores
precision, recall = func(ref_elements[iP], est_elements[iQ])
F[iP, iQ] = util.f_measure(precision, recall)
return F
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Compute the second layer (it includes the first layer)
F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2)
# Compute the final scores (third layer)
precision_3 = np.mean(np.max(F_2, axis=0))
recall_3 = np.mean(np.max(F_2, axis=1))
f_measure_3 = util.f_measure(precision_3, recall_3)
return f_measure_3, precision_3, recall_3
def first_n_three_layer_P(reference_patterns, estimated_patterns, n=5):
"""First n three-layer precision.
This metric is basically the same as the three-layer FPR but it is only
applied to the first n estimated patterns, and it only returns the
precision. In MIREX and typically, n = 5.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns,
... est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix
(Default value = 5)
Returns
-------
precision : float
The first n three-layer Precision
"""
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
# Compute the three-layer scores for the first n estimated patterns
F, P, R = three_layer_FPR(reference_patterns, fn_est_patterns)
return P # Return the precision only
def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5):
"""First n target proportion establishment recall metric.
This metric is similar is similar to the establishment FPR score, but it
only takes into account the first n estimated patterns and it only
outputs the Recall value of it.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> R = mir_eval.pattern.first_n_target_proportion_R(
... ref_patterns, est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix.
(Default value = 5)
Returns
-------
recall : float
The first n target proportion Recall.
"""
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
F, P, R = establishment_FPR(reference_patterns, fn_est_patterns)
return R
def evaluate(ref_patterns, est_patterns, **kwargs):
"""Load data and perform the evaluation.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)
Parameters
----------
ref_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
est_patterns : list
The estimated patterns in the same format
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all the metrics
scores = collections.OrderedDict()
# Standard scores
scores['F'], scores['P'], scores['R'] = \
util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs)
# Establishment scores
scores['F_est'], scores['P_est'], scores['R_est'] = \
util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns,
**kwargs)
# Occurrence scores
# Force these values for thresh
kwargs['thresh'] = .5
scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
kwargs['thresh'] = .75
scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
# Three-layer scores
scores['F_3'], scores['P_3'], scores['R_3'] = \
util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns,
**kwargs)
# First Five Patterns scores
# Set default value of n
if 'n' not in kwargs:
kwargs['n'] = 5
scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns,
est_patterns, **kwargs)
scores['FFTP_est'] = \
util.filter_kwargs(first_n_target_proportion_R, ref_patterns,
est_patterns, **kwargs)
return scores
| mit | 05aa119780c527de8c578859941e1a7b | 33.471449 | 84 | 0.619309 | 4.086081 | false | false | false | false |
craffel/mir_eval | tests/test_hierarchy.py | 1 | 10581 | '''
Unit tests for mir_eval.hierarchy
'''
from glob import glob
import re
import warnings
import json
import numpy as np
import scipy.sparse
import mir_eval
from nose.tools import raises
A_TOL = 1e-12
def test_tmeasure_pass():
# The estimate here gets none of the structure correct.
ref = [[[0, 30]], [[0, 15], [15, 30]]]
# convert to arrays
ref = [np.asarray(_) for _ in ref]
est = ref[:1]
def __test(window, frame_size):
# The estimate should get 0 score here
scores = mir_eval.hierarchy.tmeasure(ref, est,
window=window,
frame_size=frame_size)
for k in scores:
assert k == 0.0
# The reference should get a perfect score here
scores = mir_eval.hierarchy.tmeasure(ref, ref,
window=window,
frame_size=frame_size)
for k in scores:
assert k == 1.0
for window in [5, 10, 15, 30, 90, None]:
for frame_size in [0.1, 0.5, 1.0]:
yield __test, window, frame_size
def test_tmeasure_warning():
# Warn if there are missing boundaries from one layer to the next
ref = [[[0, 5],
[5, 10]],
[[0, 10]]]
ref = [np.asarray(_) for _ in ref]
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
mir_eval.hierarchy.tmeasure(ref, ref)
assert len(out) > 0
assert out[0].category is UserWarning
assert ('Segment hierarchy is inconsistent at level 1'
in str(out[0].message))
def test_tmeasure_fail_span():
# Does not start at 0
ref = [[[1, 10]],
[[1, 5],
[5, 10]]]
ref = [np.asarray(_) for _ in ref]
yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, ref
# Does not end at the right time
ref = [[[0, 5]],
[[0, 5],
[5, 6]]]
ref = [np.asarray(_) for _ in ref]
yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, ref
# Two annotaions of different shape
ref = [[[0, 10]],
[[0, 5],
[5, 10]]]
ref = [np.asarray(_) for _ in ref]
est = [[[0, 15]],
[[0, 5],
[5, 15]]]
est = [np.asarray(_) for _ in est]
yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, est
def test_tmeasure_fail_frame_size():
ref = [[[0, 60]],
[[0, 30],
[30, 60]]]
ref = [np.asarray(_) for _ in ref]
@raises(ValueError)
def __test(window, frame_size):
mir_eval.hierarchy.tmeasure(ref, ref,
window=window,
frame_size=frame_size)
for window in [None, 15, 30]:
for frame_size in [-1, 0]:
yield __test, window, frame_size
if window is not None:
yield __test, window, 2 * window
def test_lmeasure_pass():
# The estimate here gets none of the structure correct.
ref = [[[0, 30]], [[0, 15], [15, 30]]]
ref_lab = [['A'], ['a', 'b']]
# convert to arrays
ref = [np.asarray(_) for _ in ref]
est = ref[:1]
est_lab = ref_lab[:1]
def __test(frame_size):
# The estimate should get 0 score here
scores = mir_eval.hierarchy.lmeasure(ref, ref_lab, est, est_lab,
frame_size=frame_size)
for k in scores:
assert k == 0.0
# The reference should get a perfect score here
scores = mir_eval.hierarchy.lmeasure(ref, ref_lab, ref, ref_lab,
frame_size=frame_size)
for k in scores:
assert k == 1.0
for frame_size in [0.1, 0.5, 1.0]:
yield __test, frame_size
def test_lmeasure_warning():
# Warn if there are missing boundaries from one layer to the next
ref = [[[0, 5],
[5, 10]],
[[0, 10]]]
ref = [np.asarray(_) for _ in ref]
ref_lab = [['a', 'b'], ['A']]
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
mir_eval.hierarchy.lmeasure(ref, ref_lab, ref, ref_lab)
assert len(out) > 0
assert out[0].category is UserWarning
assert ('Segment hierarchy is inconsistent at level 1'
in str(out[0].message))
def test_lmeasure_fail_span():
# Does not start at 0
ref = [[[1, 10]],
[[1, 5],
[5, 10]]]
ref_lab = [['A'], ['a', 'b']]
ref = [np.asarray(_) for _ in ref]
yield (raises(ValueError)(mir_eval.hierarchy.lmeasure),
ref, ref_lab, ref, ref_lab)
# Does not end at the right time
ref = [[[0, 5]],
[[0, 5],
[5, 6]]]
ref = [np.asarray(_) for _ in ref]
yield (raises(ValueError)(mir_eval.hierarchy.lmeasure),
ref, ref_lab, ref, ref_lab)
# Two annotations of different shape
ref = [[[0, 10]],
[[0, 5],
[5, 10]]]
ref = [np.asarray(_) for _ in ref]
est = [[[0, 15]],
[[0, 5],
[5, 15]]]
est = [np.asarray(_) for _ in est]
yield (raises(ValueError)(mir_eval.hierarchy.lmeasure),
ref, ref_lab, est, ref_lab)
def test_lmeasure_fail_frame_size():
ref = [[[0, 60]],
[[0, 30],
[30, 60]]]
ref = [np.asarray(_) for _ in ref]
ref_lab = [['A'], ['a', 'b']]
@raises(ValueError)
def __test(frame_size):
mir_eval.hierarchy.lmeasure(ref, ref_lab, ref, ref_lab,
frame_size=frame_size)
for frame_size in [-1, 0]:
yield __test, frame_size
def test_hierarchy_regression():
ref_files = sorted(glob('data/hierarchy/ref*.lab'))
est_files = sorted(glob('data/hierarchy/est*.lab'))
out_files = sorted(glob('data/hierarchy/output*.json'))
ref_hier = [mir_eval.io.load_labeled_intervals(_) for _ in ref_files]
est_hier = [mir_eval.io.load_labeled_intervals(_) for _ in est_files]
ref_ints = [seg[0] for seg in ref_hier]
ref_labs = [seg[1] for seg in ref_hier]
est_ints = [seg[0] for seg in est_hier]
est_labs = [seg[1] for seg in est_hier]
def __test(w, ref_i, ref_l, est_i, est_l, target):
outputs = mir_eval.hierarchy.evaluate(ref_i, ref_l,
est_i, est_l,
window=w)
for key in target:
assert np.allclose(target[key], outputs[key], atol=A_TOL)
for out in out_files:
with open(out, 'r') as fdesc:
target = json.load(fdesc)
# Extract the window parameter
window = float(re.match('.*output_w=(\d+).json$', out).groups()[0])
yield __test, window, ref_ints, ref_labs, est_ints, est_labs, target
def test_count_inversions():
# inversion count = |{(i, j) : a[i] >= b[j]}|
a = [2, 4, 6]
b = [1, 2, 3, 4]
# All inversions (a, b) are:
# (2, 1), (2, 2)
# (4, 1), (4, 2), (4, 3), (4, 4)
# (6, 1), (6, 2), (6, 3), (6, 4)
assert mir_eval.hierarchy._count_inversions(a, b) == 10
# All inversions (b, a) are:
# (2, 2)
# (3, 2)
# (4, 2), (4, 4)
assert mir_eval.hierarchy._count_inversions(b, a) == 4
# And test with repetitions
a = [2, 2, 4]
b = [1, 2, 4, 4]
# counts: (a, b)
# (2, 1), (2, 2)
# (2, 1), (2, 2)
# (4, 1), (4, 2), (4, 4), (4, 4)
assert mir_eval.hierarchy._count_inversions(a, b) == 8
# count: (b, a)
# (2, 2), (2, 2)
# (4, 2), (4, 2), (4, 4)
# (4, 2), (4, 2), (4, 4)
assert mir_eval.hierarchy._count_inversions(b, a) == 8
def test_meet():
frame_size = 1
int_hier = [np.array([[0, 10]]),
np.array([[0, 6], [6, 10]]),
np.array([[0, 2], [2, 4], [4, 6], [6, 8], [8, 10]])]
lab_hier = [['X'],
['A', 'B'],
['a', 'b', 'a', 'c', 'b']]
# Target output
meet_truth = np.asarray([
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[2, 2, 3, 3, 2, 2, 1, 1, 3, 3], # (XAb)
[2, 2, 3, 3, 2, 2, 1, 1, 3, 3], # (XAb)
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[1, 1, 1, 1, 1, 1, 3, 3, 2, 2], # (XBc)
[1, 1, 1, 1, 1, 1, 3, 3, 2, 2], # (XBc)
[1, 1, 3, 3, 1, 1, 2, 2, 3, 3], # (XBb)
[1, 1, 3, 3, 1, 1, 2, 2, 3, 3], # (XBb)
])
meet = mir_eval.hierarchy._meet(int_hier, lab_hier, frame_size)
# Is it the right type?
assert isinstance(meet, scipy.sparse.csr_matrix)
meet = meet.toarray()
# Does it have the right shape?
assert meet.shape == (10, 10)
# Does it have the right value?
assert np.all(meet == meet_truth)
def test_compare_frame_rankings():
# number of pairs (i, j)
# where ref[i] < ref[j] and est[i] >= est[j]
ref = np.asarray([1, 2, 3, 3])
# ref pairs (transitive)
# (1, 2), (1, 3), (1, 3), (2, 3), (2, 3)
# ref pairs (non-transitive)
# (1, 2), (2, 3), (2, 3)
# Just count the normalizers
# No self-inversions are possible from ref to itself
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, ref,
transitive=True)
assert inv == 0
assert norm == 5.0
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, ref,
transitive=False)
assert inv == 0
assert norm == 3.0
est = np.asarray([1, 2, 1, 3])
# In the transitive case, we lose two pairs
# (1, 3) and (2, 2) -> (1, 1), (2, 1)
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, est,
transitive=True)
assert inv == 2
assert norm == 5.0
# In the non-transitive case, we only lose one pair
# because (1,3) was not counted
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, est,
transitive=False)
assert inv == 1
assert norm == 3.0
# Do an all-zeros test
ref = np.asarray([1, 1, 1, 1])
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, ref,
transitive=True)
assert inv == 0
assert norm == 0.0
| mit | 3d95896eaaa1cb3f9a29a3144bff04fc | 26.992063 | 76 | 0.489935 | 3.205392 | false | true | false | false |
craffel/mir_eval | mir_eval/key.py | 1 | 6857 | '''
Key Detection involves determining the underlying key (distribution of notes
and note transitions) in a piece of music. Key detection algorithms are
evaluated by comparing their estimated key to a ground-truth reference key and
reporting a score according to the relationship of the keys.
Conventions
-----------
Keys are represented as strings of the form ``'(key) (mode)'``, e.g. ``'C#
major'`` or ``'Fb minor'``. The case of the key is ignored. Note that certain
key strings are equivalent, e.g. ``'C# major'`` and ``'Db major'``. The mode
may only be specified as either ``'major'`` or ``'minor'``, no other mode
strings will be accepted.
Metrics
-------
* :func:`mir_eval.key.weighted_score`: Heuristic scoring of the relation of two
keys.
'''
import collections
from . import util
KEY_TO_SEMITONE = {'c': 0, 'c#': 1, 'db': 1, 'd': 2, 'd#': 3, 'eb': 3, 'e': 4,
'f': 5, 'f#': 6, 'gb': 6, 'g': 7, 'g#': 8, 'ab': 8, 'a': 9,
'a#': 10, 'bb': 10, 'b': 11, 'x': None}
def validate_key(key):
"""Checks that a key is well-formatted, e.g. in the form ``'C# major'``.
The Key can be 'X' if it is not possible to categorize the Key and mode
can be 'other' if it can't be categorized as major or minor.
Parameters
----------
key : str
Key to verify
"""
if len(key.split()) != 2 \
and not (len(key.split()) and key.lower() == 'x'):
raise ValueError("'{}' is not in the form '(key) (mode)' "
"or 'X'".format(key))
if key.lower() != 'x':
key, mode = key.split()
if key.lower() == 'x':
raise ValueError(
"Mode {} is invalid; 'X' (Uncategorized) "
"doesn't have mode".format(mode))
if key.lower() not in KEY_TO_SEMITONE:
raise ValueError(
"Key {} is invalid; should be e.g. D or C# or Eb or "
"X (Uncategorized)".format(key))
if mode not in ['major', 'minor', 'other']:
raise ValueError(
"Mode '{}' is invalid; must be 'major', 'minor' or 'other'"
.format(mode))
def validate(reference_key, estimated_key):
"""Checks that the input annotations to a metric are valid key strings and
throws helpful errors if not.
Parameters
----------
reference_key : str
Reference key string.
estimated_key : str
Estimated key string.
"""
for key in [reference_key, estimated_key]:
validate_key(key)
def split_key_string(key):
"""Splits a key string (of the form, e.g. ``'C# major'``), into a tuple of
``(key, mode)`` where ``key`` is is an integer representing the semitone
distance from C.
Parameters
----------
key : str
String representing a key.
Returns
-------
key : int
Number of semitones above C.
mode : str
String representing the mode.
"""
if key.lower() != 'x':
key, mode = key.split()
else:
mode = None
return KEY_TO_SEMITONE[key.lower()], mode
def weighted_score(reference_key, estimated_key):
"""Computes a heuristic score which is weighted according to the
relationship of the reference and estimated key, as follows:
+------------------------------------------------------+-------+
| Relationship | Score |
+------------------------------------------------------+-------+
| Same key and mode | 1.0 |
+------------------------------------------------------+-------+
| Estimated key is a perfect fifth above reference key | 0.5 |
+------------------------------------------------------+-------+
| Relative major/minor (same key signature) | 0.3 |
+------------------------------------------------------+-------+
| Parallel major/minor (same key) | 0.2 |
+------------------------------------------------------+-------+
| Other | 0.0 |
+------------------------------------------------------+-------+
Examples
--------
>>> ref_key = mir_eval.io.load_key('ref.txt')
>>> est_key = mir_eval.io.load_key('est.txt')
>>> score = mir_eval.key.weighted_score(ref_key, est_key)
Parameters
----------
reference_key : str
Reference key string.
estimated_key : str
Estimated key string.
Returns
-------
score : float
Score representing how closely related the keys are.
"""
validate(reference_key, estimated_key)
reference_key, reference_mode = split_key_string(reference_key)
estimated_key, estimated_mode = split_key_string(estimated_key)
# If keys are the same, return 1.
if reference_key == estimated_key and reference_mode == estimated_mode:
return 1.
# If reference or estimated key are x and they are not the same key
# then the result is 'Other'.
if reference_key is None or estimated_key is None:
return 0.
# If keys are the same mode and a perfect fifth (differ by 7 semitones)
if (estimated_mode == reference_mode and
(estimated_key - reference_key) % 12 == 7):
return 0.5
# Estimated key is relative minor of reference key (9 semitones)
if (estimated_mode != reference_mode == 'major' and
(estimated_key - reference_key) % 12 == 9):
return 0.3
# Estimated key is relative major of reference key (3 semitones)
if (estimated_mode != reference_mode == 'minor' and
(estimated_key - reference_key) % 12 == 3):
return 0.3
# If keys are in different modes and parallel (same key name)
if estimated_mode != reference_mode and reference_key == estimated_key:
return 0.2
# Otherwise return 0
return 0.
def evaluate(reference_key, estimated_key, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> ref_key = mir_eval.io.load_key('reference.txt')
>>> est_key = mir_eval.io.load_key('estimated.txt')
>>> scores = mir_eval.key.evaluate(ref_key, est_key)
Parameters
----------
ref_key : str
Reference key string.
ref_key : str
Estimated key string.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all metrics
scores = collections.OrderedDict()
scores['Weighted Score'] = util.filter_kwargs(
weighted_score, reference_key, estimated_key)
return scores
| mit | 0e32bf67aed1531d3ce5bdaba65bfa41 | 33.807107 | 79 | 0.540907 | 4.083979 | false | false | false | false |
craffel/mir_eval | mir_eval/tempo.py | 1 | 5348 | '''
The goal of a tempo estimation algorithm is to automatically detect the tempo
of a piece of music, measured in beats per minute (BPM).
See http://www.music-ir.org/mirex/wiki/2014:Audio_Tempo_Estimation for a
description of the task and evaluation criteria.
Conventions
-----------
Reference and estimated tempi should be positive, and provided in ascending
order as a numpy array of length 2.
The weighting value from the reference must be a float in the range [0, 1].
Metrics
-------
* :func:`mir_eval.tempo.detection`: Relative error, hits, and weighted
precision of tempo estimation.
'''
import warnings
import numpy as np
import collections
from . import util
def validate_tempi(tempi, reference=True):
"""Checks that there are two non-negative tempi.
For a reference value, at least one tempo has to be greater than zero.
Parameters
----------
tempi : np.ndarray
length-2 array of tempo, in bpm
reference : bool
indicates a reference value
"""
if tempi.size != 2:
raise ValueError('tempi must have exactly two values')
if not np.all(np.isfinite(tempi)) or np.any(tempi < 0):
raise ValueError('tempi={} must be non-negative numbers'.format(tempi))
if reference and np.all(tempi == 0):
raise ValueError('reference tempi={} must have one'
' value greater than zero'.format(tempi))
def validate(reference_tempi, reference_weight, estimated_tempi):
"""Checks that the input annotations to a metric look like valid tempo
annotations.
Parameters
----------
reference_tempi : np.ndarray
reference tempo values, in bpm
reference_weight : float
perceptual weight of slow vs fast in reference
estimated_tempi : np.ndarray
estimated tempo values, in bpm
"""
validate_tempi(reference_tempi, reference=True)
validate_tempi(estimated_tempi, reference=False)
if reference_weight < 0 or reference_weight > 1:
raise ValueError('Reference weight must lie in range [0, 1]')
def detection(reference_tempi, reference_weight, estimated_tempi, tol=0.08):
"""Compute the tempo detection accuracy metric.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
tol : float in [0, 1]:
The maximum allowable deviation from a reference tempo to
count as a hit.
``|est_t - ref_t| <= tol * ref_t``
(Default value = 0.08)
Returns
-------
p_score : float in [0, 1]
Weighted average of recalls:
``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``
one_correct : bool
True if at least one reference tempo was correctly estimated
both_correct : bool
True if both reference tempi were correctly estimated
Raises
------
ValueError
If the input tempi are ill-formed
If the reference weight is not in the range [0, 1]
If ``tol < 0`` or ``tol > 1``.
"""
validate(reference_tempi, reference_weight, estimated_tempi)
if tol < 0 or tol > 1:
raise ValueError('invalid tolerance {}: must lie in the range '
'[0, 1]'.format(tol))
if tol == 0.:
warnings.warn('A tolerance of 0.0 may not '
'lead to the results you expect.')
hits = [False, False]
for i, ref_t in enumerate(reference_tempi):
if ref_t > 0:
# Compute the relative error for this reference tempo
f_ref_t = float(ref_t)
relative_error = np.min(np.abs(ref_t - estimated_tempi) / f_ref_t)
# Count the hits
hits[i] = relative_error <= tol
p_score = reference_weight * hits[0] + (1.0-reference_weight) * hits[1]
one_correct = bool(np.max(hits))
both_correct = bool(np.min(hits))
return p_score, one_correct, both_correct
def evaluate(reference_tempi, reference_weight, estimated_tempi, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all metrics
scores = collections.OrderedDict()
(scores['P-score'],
scores['One-correct'],
scores['Both-correct']) = util.filter_kwargs(detection, reference_tempi,
reference_weight,
estimated_tempi,
**kwargs)
return scores
| mit | ed85adce793a9ae76ad5b90ab50b94fa | 28.224044 | 79 | 0.618923 | 4.107527 | false | false | false | false |
jorvis/biocode | fasta/report_or_replace_nonstandard_residues.py | 1 | 6164 | #!/usr/bin/env python3
import argparse
import sys
from biocode import utils
'''
Description:
Some software is not written to handle the ambiguity residues defined by IUPAC. The
residue "J" in a protein sequence, for example, can represent leucine or isoleucine but
can cause some software to fail. You can use this script to either produce a report
on the non-standard residues in a multifasta file, or to optionally replace those
residues with user-specified ones.
Input:
This script can essentially be used in two modes:
1. Report: Define the -i (input), -t (type) and -l (list) options to read through your
file and generate an output file showing each sequence ID which contains ambiguous
bases and a list of those bases found (see Output section below.)
EXAMPLE: Create foo.report listing the non-standard residues in a nucleotide multi-fasta
./report_or_replace_nonstandard_residues.py -i file.fna -t n -l foo.report
2. Replace: You can pass additional options to replace (-r) any given residue with (-w)
another residue of your choosing.
EXAMPLE: Replace all J residues with L in the protein file 'file.faa' and write results
to a new 'file.fixed.faa'
./report_or_replace_nonstandard_residues.py -i file.faa -t p -r J -w L -o file.fixed.faa
Output:
If you define the --list option, an output file will be created where each line represents
a sequence containing ambiguous residues. Tab separated, each additional column contains
the ambiguous residues found within along with the count of each. Example:
cgd6_270 B:1 J:3
cgd3_1180 B:2
cgd4_4160 B:1 U:1
PVX_173270 J:1
If replacing characters using the -r and -w options, a new file defined by -o is created
with the user-specified residues replaced. All IDs and headers are retained and sequences
residues are written 60-characters per line.
If the --print_locations flag is passed, the locations of every non-standard residue is
printed to STDERR:
EXAMPLE: $ ./report_or_replace_nonstandard_residues.py -i test.fna -t n -pl
Molecule gnl|WGS:AAGK|NW_876245.1|gb|AAGK01000005 contains residue K at position 36211
Molecule gnl|WGS:AAGK|NW_876247.1|gb|AAGK01000003 contains residue R at position 4066
Molecule gnl|WGS:AAGK|NW_876247.1|gb|AAGK01000003 contains residue W at position 4096
'''
def main():
parser = argparse.ArgumentParser( description='Reports on non-standard characters in multifasta files and can optionally replace residues')
parser.add_argument('-i', '--input', type=str, required=True, help='Path to an input FASTA file' )
parser.add_argument('-t', '--type', type=str, required=True, choices=('n', 'p'), help='Either n for nucleotide or p for protein')
parser.add_argument('-o', '--output', type=str, required=False, help='Path to an output FASTA file to be created if doing replacement' )
parser.add_argument('-pl', '--print_locations', dest='print_locations', action='store_true', help='If passed, will report coordinate of each non-standard residue on STDERR' )
parser.add_argument('-r', '--replace', type=str, required=False, help='Replace this character with the one defined by --with_' )
parser.add_argument('-w', '--with_', type=str, required=False, help='This character or set replaces all instances of the one found in --replace' )
parser.add_argument('-l', '--list', type=str, required=False, help='Optional file of IDs where non-standard residues were detected or replaced' )
parser.add_argument('-g', '--ignore', type=str, required=False, default='N*X', help='List of characters to not report as non-standard. Default = the universal ambiguity bases (N, X) or the end-of-translation stop for proteins (*)' )
parser.set_defaults(print_locations=False)
args = parser.parse_args()
if args.output is None:
out_fh = sys.stdout
else:
out_fh = open( args.output, 'wt' )
## if you define --replace, you must also define --with_, and vice versa
if args.replace is not None and args.with_ is None:
raise Exception("ERROR: You must pass --with_ when passing --replace")
if args.with_ is not None and args.replace is None:
raise Exception("ERROR: You must pass --replace when passing --with_")
seqs = utils.fasta_dict_from_file(args.input)
## standard characters (depends on the type of sequence)
standard_residues = dict()
if args.type == 'n':
for base in list("ATGCU"):
standard_residues[base] = 1
else:
for base in list("ACDEFGHIKLMNPQRSTVWY"):
standard_residues[base] = 1
if args.list is not None:
list_fh = open(args.list, 'wt')
## build the lookup of characters to ignore
ignore_residues = dict()
for residue in list(args.ignore):
ignore_residues[residue.upper()] = None
## process the sequences
seqs_with_bad_chars = dict()
for seq_id in seqs:
i = 0
seq = seqs[seq_id]
bad_chars = dict()
for base in list(seq['s']):
i += 1
ubase = base.upper()
if ubase not in standard_residues and ubase not in ignore_residues:
if ubase in bad_chars:
bad_chars[ubase] += 1
else:
bad_chars[ubase] = 1
if args.print_locations == True:
print("Molecule {0} contains residue {1} at position {2}".format(seq_id, ubase, i), file=sys.stderr)
if args.list is not None and len(bad_chars) > 0:
list_fh.write("{0}".format(seq_id))
for base in bad_chars:
list_fh.write( "\t{0}:{1}".format(base, bad_chars[base]) )
list_fh.write("\n")
if args.replace is not None:
seq['s'] = seq['s'].replace(args.replace, args.with_)
out_fh.write( ">{0} {1}\n".format(seq_id, seq['h']) )
for i in range(0, len(seq['s']), 60):
out_fh.write(seq['s'][i : i + 60] + "\n")
if __name__ == '__main__':
main()
| mit | c579ca2a10a09722089aa11cd8afecf3 | 40.369128 | 237 | 0.652012 | 3.611013 | false | false | false | false |
jorvis/biocode | sandbox/jorvis/collapse_gene_coordinates_to_mRNA_range.py | 1 | 2985 | #!/usr/bin/env python3
'''
This script was needed when some tools (such as WebApollo) were generating gene models where the
gene coordinates extended past the boundaries of the mRNA child feature, and it was required
to trim them to a matching range.
INPUT GFF3
The encoding convention for gene models is:
Supercontig_3.10 EVM gene 23723 28269 . + . ID=evm.TU.Supercontig_3.10.9;Name=EVM%20prediction%20Supercontig_3.10.9
Supercontig_3.10 EVM mRNA 23723 28269 . + . ID=evm.model.Supercontig_3.10.9;Parent=evm.TU.Supercontig_3.10.9
Supercontig_3.10 EVM exon 23723 24696 . + . ID=evm.model.Supercontig_3.10.9.exon1;Parent=evm.model.Supercontig_3.10.9
Supercontig_3.10 EVM CDS 23723 24696 . + 0 ID=cds.evm.model.Supercontig_3.10.9;Parent=evm.model.Supercontig_3.10.9
Supercontig_3.10 EVM exon 24756 28269 . + . ID=evm.model.Supercontig_3.10.9.exon2;Parent=evm.model.Supercontig_3.10.9
Supercontig_3.10 EVM CDS 24756 28269 . + 1 ID=cds.evm.model.Supercontig_3.10.9;Parent=evm.model.Supercontig_3.10.9
Follow the GFF3 specification!
Author: Joshua Orvis
'''
import argparse
import gff
def main():
parser = argparse.ArgumentParser( description='Shortens gene feature coordinates to their longest child mRNA')
## output file to be written
parser.add_argument('-i', '--input_gff3', type=str, required=True, help='Path to the input GFF3' )
parser.add_argument('-o', '--output_gff3', type=str, required=True, help='Path to GFF3 output file to be created')
args = parser.parse_args()
(assemblies, features) = gff.get_gff3_features(args.input_gff3)
gff_out = open(args.output_gff3, 'wt')
gff_out.write("##gff-version 3\n")
for assembly_id in assemblies:
for gene in assemblies[assembly_id].genes():
gene_loc = gene.location()
# loop through the mRNAs and store the outer boundaries of those found
min_coord = None
max_coord = None
mRNAs = gene.mRNAs()
if len(mRNAs) >= 1:
for mRNA in mRNAs:
mRNA_loc = mRNA.location()
if min_coord is None or mRNA_loc.fmin < min_coord:
min_coord = mRNA_loc.fmin
if max_coord is None or mRNA_loc.fmax > max_coord:
max_coord = mRNA_loc.fmax
if min_coord != gene_loc.fmin or max_coord != gene_loc.fmax:
print("DEBUG: Changed gene {0} from {1}-{2} to {3}-{4}".format(gene.id, gene_loc.fmin, gene_loc.fmax, min_coord, max_coord))
gene_loc.fmin = min_coord
gene_loc.fmax = max_coord
gene.print_as(fh=gff_out, source='IGS', format='gff3')
if __name__ == '__main__':
main()
| mit | 99ccb7b6157e6917ed1e1fa82407d86c | 36.78481 | 153 | 0.591625 | 3.142105 | false | false | false | false |
jorvis/biocode | gff/convert_gff3_to_ncbi_tbl.py | 1 | 3924 | #!/usr/bin/env python3
"""
This script can be used to transform a GFF3 file into a TBL file suitable for submitting
to NCBI. Its encoding is meant to include prokaryotes and eukaryotes, though it was
written/tested first for eukaryotes.
Note, you'll need to modify the IDs if you're doing an update to an already existing
submission, since this generates them automatically base on your prefix.
The options:
--input_file: Must follow the GFF3 specification
--output_base: Will be used to write NCBI table and fasta formatted files. TBL defined here:
http://www.ncbi.nlm.nih.gov/genbank/eukaryotic_genome_submission_annotation
--lab_name: A short, unique identifier, no spaces, which will form part of the exported
protein and transcript IDs. See the documentation for examples:
Documentation: http://www.ncbi.nlm.nih.gov/genbank/eukaryotic_genome_submission_annotation#protein_id
--ncbi_acc_prefix: Assigned by NCBI, this is the accession prefix for this submission.
--fasta: NCBI has strict requirements for feature identifiers, and this script will reformat them
unless you have already prepared them correctly. If corrections need to be made, a new
corresponding FASTA file will also be written, which requires this input source fasta file.
--go_obo: Optional. If your GFF3 annotation includes GO attributes, this is required because
the GFF3 has only the ID but the TBL file also requires the descriptor and class, so
this enables a lookup of these using the annotated GO ids.
"""
import argparse
from biocode import utils, gff, things, tbl
def main():
parser = argparse.ArgumentParser( description='Create a TBL file for submission to NCBI from GFF3')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read' )
parser.add_argument('-o', '--output_base', type=str, required=True, help='Base name of output files to be created' )
parser.add_argument('-ln', '--lab_name', type=str, required=True, help='Required by NCBI to identify the submitting group' )
parser.add_argument('-nap', '--ncbi_acc_prefix', type=str, required=True, help='Required and assigned by NCBI' )
parser.add_argument('-gf', '--genomic_fasta', type=str, required=False, help='FASTA file of genomic sequence, if not embedded in GFF' )
parser.add_argument('-go', '--go_obo', type=str, required=False, help='GO terms will not be exported unless you pass the path to a GO OBO file')
args = parser.parse_args()
(assemblies, features) = gff.get_gff3_features(args.input_file)
if args.genomic_fasta is not None:
utils.add_assembly_fasta(assemblies, args.genomic_fasta)
new_assemblies = dict()
## We need to first check the ID format
reformat_IDs = True
## maps old IDs (like tp.assembly.567468735.1) to new ones (like AAGK01000001)
asm_id_map = dict()
asm_num = 1
for asm_id in assemblies:
# pre-formatted IDs are like this: gnl|WGS:XXXX|SeqID|gb|XXXX01xxxxxx
if asm_id.startswith('gnl|WGS:'):
reformat_IDs = False
break
else:
new_id = "gnl|WGS:{0}|SeqID|gb|{0}01{1:06d}".format(args.ncbi_acc_prefix, asm_num)
asm_id_map[asm_id] = new_id
asm_num += 1
new_assemblies[new_id] = assemblies[asm_id]
new_assemblies[new_id].id = new_id
if reformat_IDs == True:
assemblies = new_assemblies
ofh = open("{0}.tbl".format(args.output_base), 'wt')
tbl.print_tbl_from_assemblies(assemblies=assemblies, ofh=ofh, go_obo=args.go_obo, lab_name=args.lab_name)
mset = things.AssemblySet()
mset.load_from_dict(assemblies)
mset.write_fasta(path="{0}.fna".format(args.output_base))
if __name__ == '__main__':
main()
| mit | 9f44446c567a1754c81d66d21ff0a106 | 39.875 | 148 | 0.681702 | 3.409209 | false | false | false | false |
jorvis/biocode | gff/convert_blast_btab_to_gff3.py | 3 | 6536 | #!/usr/bin/env python3
"""
Converts the btab output of Ergatis ncbi-blast to GFF3
Example input:
GluR_4 3300 BLASTN Trinity.fasta comp93586_c0_seq1 26 2208 2180 1 99.6 0.0 2139 4240 len=2337 path=[1:0-1874 1876:1875-1882 1884:1883-2336] 1 Plus 2337 0.0 0.0
GluR_4 3300 BLASTN Trinity.fasta comp103174_c0_seq1 2415 3277 2105 1245 98.7 0.0 819 1624 len=2105 path=[2313:0-204 2518:205-302 2616:303-366 2680:367-367 2681:368-390 2704:391-1395 3710:1396-1752 4067:1753-1763 4078:1764-2001 4316:2002-2081 5675:2082-2104] 1 Plus 2105 0.0 0.0
GluR_4 3300 BLASTN Trinity.fasta comp103174_c0_seq2 2186 2439 1 254 100.0 100.0 254 504 len=254 path=[1:0-168 170:169-200 202:201-253] 1 Plus 254 1e-140 1e-140
PICK1 1242 BLASTN Trinity.fasta comp3011803_c0_seq1 45 170 159 34 90.5 0.0 78 155 len=517 path=[495:0-516] 1 Plus 517 6e-36 6e-36
RICTOR 5676 BLASTN Trinity.fasta comp102759_c0_seq1 182 4349 6303 2136 99.1 0.0 4048 8025 len=6303 path=[1:0-1699 1701:1700-1706 1708:1707-4017 4019:4018-4024 4026:4025-4070 4072:4071-4075 4077:4076-6302] 1 Plus 6303 0.0 0.0
RICTOR 5676 BLASTN Trinity.fasta comp102759_c0_seq1 4428 5676 2057 809 99.9 0.0 1245 2468 len=6303 path=[1:0-1699 1701:1700-1706 1708:1707-4017 4019:4018-4024 4026:4025-4070 4072:4071-4075 4077:4076-6302] 1
The columns are:
1 query_name
2 date
3 query_length
4 algorithm
5 database_name
6 hit_name
7 qry_start
8 qry_end
9 hit_start
10 hit_end
11 percent_identity
12 percent_similarity
13 raw_score
14 bit_score
15 NULL
16 hit_description
17 blast_frame
18 qry_strand (Plus | Minus)
19 hit_length
20 e_value
21 p_value
Example output:
WARNING:
Currently only tested with blastn
"""
import argparse
import os
from operator import itemgetter
next_ids = {'match':1, 'match_part':1 }
def main():
parser = argparse.ArgumentParser( description='NCBI-BLAST (btab) converter to GFF3 format')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to parse' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-p', '--perc_identity_cutoff', type=float, required=False, help='Filters on the perc identity of each HSP' )
args = parser.parse_args()
algorithm = None
current_qry_id = None
current_match_parts = list()
ofh = open(args.output_file, 'w')
ofh.write("##gff-version 3\n")
for line in open(args.input_file, 'r'):
cols = line.split("\t")
qry_id = cols[0];
segment = { 'qry_id':qry_id, 'qry_start':int(cols[6]), 'qry_end':int(cols[7]), \
'hit_id':cols[5], 'hit_start':int(cols[8]), 'hit_end':int(cols[9]), \
'pct_id':float(cols[10]), 'strand':cols[17], 'eval':float(cols[19]) }
if algorithm is None:
algorithm = cols[3]
if qry_id != current_qry_id:
# this is a new chain, export the previous one
if current_qry_id is not None:
export_match( current_match_parts, ofh, algorithm, args.perc_identity_cutoff )
current_match_parts = list()
current_qry_id = qry_id
current_match_parts.append( segment )
## make sure to do the last one
if current_qry_id is not None:
export_match( current_match_parts, ofh, algorithm, args.perc_identity_cutoff )
def export_match( segments, out, source, perc_id_cutoff ):
contig_min = None
contig_max = None
contig_id = None
hit_id = None
hit_min = None
hit_max = None
segment_strand = '+'
for segment in segments:
segment_min = min( segment['qry_start'], segment['qry_end'] )
segment_max = max( segment['qry_start'], segment['qry_end'] )
segment_hit_min = min( segment['hit_start'], segment['hit_end'] )
segment_hit_max = max( segment['hit_start'], segment['hit_end'] )
if segment['strand'] == 'Minus':
segment_strand = '-'
if contig_min is None or segment_min < contig_min:
contig_min = segment_min
hit_min = segment_hit_min
if contig_max is None or segment_max > contig_max:
contig_max = segment_max
hit_max = segment_hit_max
if contig_id is None:
contig_id = segment['qry_id']
if hit_id is None:
hit_id = segment['hit_id']
## does this score above any user-defined cutoffs.
if perc_id_cutoff is not None:
pct_id = global_pct_id( segments )
if pct_id < perc_id_cutoff:
return False
## write the match feature
match_id = "match.{0}".format(next_ids['match'])
next_ids['match'] += 1
data_column = "ID={0};Target={1} {2} {3}".format(match_id, hit_id, hit_min, hit_max)
out.write( "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\n".format( \
contig_id, source, 'match', contig_min, contig_max, '.', segment_strand, '.', data_column
) )
## write the match_parts
for segment in sorted(segments, key=itemgetter('qry_start', 'qry_end')):
match_part_id = "match_part.{0}".format(next_ids['match_part'])
next_ids['match_part'] += 1
mp_start = min( segment['qry_start'], segment['qry_end'] )
mp_hit_start = min(segment['hit_start'], segment['hit_end'])
mp_end = max( segment['qry_start'], segment['qry_end'] )
mp_hit_end = max( segment['hit_start'], segment['hit_end'] )
data_column = "ID={0};Parent={1};Target={2} {3} {4}".format(match_part_id, match_id, hit_id, \
mp_hit_start, mp_hit_end)
out.write( "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\n".format( \
contig_id, source, 'match_part', mp_start, mp_end, \
'.', segment_strand, '.', data_column
) )
return True
if __name__ == '__main__':
main()
| mit | fc36944d3c886db3b0ae8145ec9837a7 | 34.139785 | 347 | 0.561965 | 2.983113 | false | false | false | false |
jorvis/biocode | sandbox/jorvis/create_glimmerHMM_training_files_from_gff.py | 1 | 2685 | #!/usr/bin/env python3
"""
The GlimmerHMM training documentation says that two files are needed for training. One
is a multi-fasta file of what are presumably transcript sequences (this is never stated,
so it could be CDS?) and the coordinates of the exons relative to the same sequences.
This script generates that given an input GFF3. Currently the mRNA features are written
to the FASTA.
This script definitely assumes that features have been sorted.
"""
import argparse
import sys
from biocode import gff
def main():
parser = argparse.ArgumentParser('Filter the genes of a GFF3 file by mRNA child IDs')
## output file to be written
parser.add_argument('-i', '--input_gff', type=str, required=True, help='GFF file of source annotation' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Optional output file path (else STDOUT)' )
args = parser.parse_args()
## output will either be a file or STDOUT
fout = sys.stdout
if args.output_file is not None:
fout = open(args.output_file, 'wt')
current_mRNA_id = None
current_mol_id = None
current_fragments = list()
current_direction = None
for line in open(args.input_gff):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
continue
# grab the ID and Parent columns if any
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
mol_id = cols[0]
type = cols[2]
if type == 'mRNA':
if current_mRNA_id is not None and id != current_mRNA_id:
# purge the existing one first
write_transcript(fout, current_mol_id, current_fragments, current_direction)
current_fragments = list()
current_mRNA_id = id
current_mol_id = cols[0]
current_direction = cols[6]
elif type == 'exon':
if cols[6] == '+':
current_fragments.append({'start':cols[3], 'end':cols[4]})
else:
current_fragments.append({'start':cols[4], 'end':cols[3]})
write_transcript(fout, current_mol_id, current_fragments, current_direction)
def write_transcript(fh, mol_id, fragments, direction):
if direction == '-':
fragments = reversed(fragments)
elif direction != '+':
raise Exception("ERROR: unrecognized direction: {0}".format(direction))
for frag in fragments:
fh.write("{0} {1} {2}\n".format(mol_id, frag['start'], frag['end']) )
fh.write("\n")
if __name__ == '__main__':
main()
| mit | a47baf125f5849090a38ed418e5dcd26 | 27.870968 | 121 | 0.605587 | 3.824786 | false | false | false | false |
jorvis/biocode | fasta/fasta_size_distribution_plot.py | 3 | 4191 | #!/usr/bin/env python3
import argparse
import matplotlib
# back-end options are here: http://matplotlib.sourceforge.net/faq/usage_faq.html#what-is-a-backend
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import os
import re
def fasta_entry_sizes(file):
seq_lengths = []
## these are reset as each seq entry is encountered
seq_lines = []
seq_count = 0
for line in open(file, 'r'):
if re.match('^\>', line):
seq_count += 1
seq = ''.join(seq_lines)
seq = re.sub(r'\s', '', seq)
seq_lengths.append( len(seq) )
seq_lines = []
else:
seq_lines.append(line)
return seq_lengths
def get_legend_labels(label_arg, file_count):
labels = []
if label_arg is not None:
labels = label_arg.split(',')
if len(labels) != file_count:
raise Exception("Error: number of input files doesn't match number of labels specified in --legend_names")
return labels
def main():
parser = argparse.ArgumentParser( description='Generate FASTA file(s) size distribution plot')
parser.add_argument('fasta_files', metavar='N', type=str, nargs='+', help='Pass one or more FASTA files')
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-t', '--title', type=str, required=False, default='FASTA size distribution', \
help='Pass a title for the graph')
parser.add_argument('-b', '--bin_count', type=int, required=False, default=30, \
help='Data will be placed into this many bins. This is the default behavior. ' + \
'Alternatively, use --bin_size and --bin_max')
parser.add_argument('-s', '--bin_size', type=int, required=False, default=0, \
help='Instead of a --bin_count, use this to specify the size of your bins.')
parser.add_argument('-m', '--bin_max', type=int, required=False, default=0, \
help='If specifying --bin_size, you can optionally use this to limit the ' + \
'maximum bound of your bins (prevents long tails in plots)')
parser.add_argument('-l', '--legend_names', type=str, required=False, help='For a legend with labels ' + \
'of each of your datasets, pass a comma-separated list with no spaces.')
parser.add_argument('-g', '--log_scale', type=bool, required=False, default=False, help='Set to true for log10 Y scale')
args = parser.parse_args()
data_ranges = []
fasta_files = args.fasta_files
size_max = 0
seqs_above_size_max = 0
for fasta_file in fasta_files:
print("INFO: parsing seq lengths in file: {0}".format(fasta_file))
sizes = fasta_entry_sizes(fasta_file)
print("INFO: {0} sequences found in {1}".format(len(sizes), fasta_file))
data_ranges.append(sizes)
this_max_size = max(sizes)
if this_max_size > size_max:
size_max = this_max_size
## calculate the bins. default is to use the bin_count
bin_opt = args.bin_count
if args.bin_size != 0:
bin_opt = []
for bin_min in range(args.bin_size, size_max, args.bin_size):
if args.bin_max == 0 or args.bin_max > bin_min:
bin_opt.append(bin_min)
if args.bin_max > bin_min:
seqs_above_size_max += 1
plot.xlabel('Sequence size (bins)')
plot.ylabel('Sequence counts')
plot.title(args.title)
if args.log_scale == True:
n, bins, patches = plot.hist(data_ranges, bin_opt, normed=0, histtype='bar', log=True)
else:
n, bins, patches = plot.hist(data_ranges, bin_opt, normed=0, histtype='bar')
plot.grid(True)
legend_labels = get_legend_labels( args.legend_names, len(fasta_files) )
if len(legend_labels) > 0:
plot.legend(legend_labels)
plot.savefig(args.output_file)
print("INFO: there were {0} data points above the range defined in the histogram".format(seqs_above_size_max))
if __name__ == '__main__':
main()
| mit | e8c64f08097679e41bf6aca3b825e8df | 33.073171 | 124 | 0.59938 | 3.65388 | false | false | false | false |
jorvis/biocode | gff/report_gff3_statistics.py | 1 | 4576 | #!/usr/bin/env python3
'''
This script reports some basic statistics about a GFF3 file. It was
written with an initial focus on gene-structure containing content,
though can be expanded as desired.
The output is a tab-delimited file where the first column is the description
of a statistic and the second is the value.
Warning: If you parse this output you'll need to skip blank lines and any
which begin with the # symbol.
Follow the GFF3 specification!
Author: Joshua Orvis
'''
import argparse
import sys
from collections import defaultdict
from biocode import gff
def main():
parser = argparse.ArgumentParser( description='Checks the CDS features against a genome sequence to report/correct phase columns.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to the input GFF3' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output file to be created' )
args = parser.parse_args()
(assemblies, features) = gff.get_gff3_features(args.input_file)
## output will either be a file or STDOUT
ofh = sys.stdout
if args.output_file is not None:
ofh = open(args.output_file, 'wt')
type_counts = defaultdict(int)
type_lengths = defaultdict(int)
assembly_lengths_found = False
# key is number of exons, value is number of mRNAs with that many
CDS_profile = defaultdict(int)
for assembly_id in assemblies:
type_counts['assembly'] += 1
if assemblies[assembly_id].length is not None:
type_lengths['assembly'] += assemblies[assembly_id].length
assembly_lengths_found = True
for gene in assemblies[assembly_id].genes():
type_counts['gene'] += 1
type_lengths['gene'] += gene.length
for mRNA in gene.mRNAs():
type_counts['mRNA'] += 1
type_lengths['mRNA'] += mRNA.length
CDS_profile[mRNA.CDS_count()] += 1
for exon in mRNA.exons():
type_counts['exon'] += 1
type_lengths['exon'] += exon.length
for CDS in mRNA.CDSs():
type_counts['CDS fragments'] += 1
type_lengths['CDS fragments'] += CDS.length
ofh.write("Assembly count\t{0}\n".format(type_counts['assembly']))
if assembly_lengths_found:
ofh.write("Assembly length\t{0}\n".format(type_lengths['assembly']))
else:
ofh.write("Assembly length\tN/A (no FASTA data in GFF?)\n")
gene_length_mean = type_lengths['gene'] / type_counts['gene']
mRNA_length_mean = type_lengths['mRNA'] / type_counts['mRNA']
exon_length_mean = type_lengths['exon'] / type_counts['exon']
CDS_length_mean = type_lengths['CDS fragments'] / type_counts['CDS fragments']
mRNAs_per_gene_mean = type_counts['mRNA'] / type_counts['gene']
exons_per_mRNA_mean = type_counts['exon'] / type_counts['mRNA']
CDS_per_mRNA_mean = type_counts['CDS fragments'] / type_counts['mRNA']
ofh.write("\nGene count\t{0}\n".format(type_counts['gene']))
ofh.write("Gene length (mean)\t{0:.1f}\n".format(gene_length_mean))
ofh.write("Gene length (sum)\t{0}\n".format(type_lengths['gene']))
ofh.write("\nmRNA count\t{0}\n".format(type_counts['mRNA']))
ofh.write("mRNA length (mean)\t{0:.1f}\n".format(mRNA_length_mean))
ofh.write("mRNA length (sum)\t{0}\n".format(type_lengths['mRNA']))
ofh.write("mRNAs per gene (mean)\t{:.1f}\n".format(mRNAs_per_gene_mean) )
ofh.write("\nexon count\t{0}\n".format(type_counts['exon']))
ofh.write("exon length (mean)\t{0:.1f}\n".format(exon_length_mean))
ofh.write("exon length (sum)\t{0}\n".format(type_lengths['exon']))
ofh.write("exons per mRNA (mean)\t{:.1f}\n".format(exons_per_mRNA_mean) )
ofh.write("\nCDS count\t{0}\n".format(type_counts['CDS fragments']))
ofh.write("CDS length (mean)\t{0:.1f}\n".format(CDS_length_mean))
ofh.write("CDS fragment length (sum)\t{0}\n".format(type_lengths['CDS fragments']))
ofh.write("CDS per mRNA (mean)\t{:.1f}\n".format(CDS_per_mRNA_mean) )
ofh.write("\n# CDS fragment composition profile: count<tab>percentage\n")
for cds_count in sorted(CDS_profile):
perc = (CDS_profile[cds_count] / type_counts['mRNA']) * 100
ofh.write("mRNAs with {0} CDS\t{1}\t{2:.3}\n".format(cds_count, CDS_profile[cds_count], perc) )
if __name__ == '__main__':
main()
| mit | b649639990bb23948656e8f506959c84 | 37.133333 | 135 | 0.626967 | 3.25231 | false | false | false | false |
jorvis/biocode | fastq/split_interleaved_sequence_file.py | 3 | 7623 | #!/usr/bin/env python3
"""
OVERVIEW
Some analysis tasks produce an interleaved FASTA or FASTQ file (such as digital normalization).
Use this script to split that interleaved file back out into R1, R2 and singleton files.
The read mates will be kept in pairwise order within the R1 and R2 files.
The -o option defines the base name of the output files to which the directionality
and extensions will be added. So given the options:
-o ATCC_30222
-t fastq
The following files will be created:
ATCC_30222.R1.fastq
ATCC_30222.R1.fastq
ATCC_30222.single.fastq
Note: No format conversion will happen here. If your input is FASTA your output
will also be FASTA.
HEADERS
This script handles read headers in either of the two following formats:
@SN7001163:74:C0YGBACXX:1:1101:1099:2196 1:N:0:ATCACGA
@SN7001163:74:C0YGBACXX:1:1101:1099:2196/1
The first of these is what comes off our Illumina machines, with the direction being the first
number after the whitespace. The latter format with /1 or /2 is what most software expects.
READ ORDER
This script expects that any reads which have pairs will have the /1 direction first immediately
followed by the /2 mate. If one of these is absent the singlet will be written to the 'single'
file. If the /1 and /2 mates are not consecutive, they will both be written as singlets.
Original author: Priti Kumari
Heavy edits: Joshua Orvis
"""
import argparse
import os
import re
def interface():
## read input fastq file
parser = argparse.ArgumentParser( description='Script to split output of Diginorm.')
parser.add_argument('-i', '--input_file',type=str, required=True, help='The interleaved fastq/fasta file to split.')
parser.add_argument('-t', '--input_type',type=str, required=True, help='The type of input file(fasta/fastq).')
## output file to be written
parser.add_argument('-o', '--output', type=str, required=True, help='Base path/name for the output files to be created' )
parser = parser.parse_args()
return parser
def process_fasta(args):
read1_file = args.output + ".R1.fasta"
read2_file = args.output + ".R2.fasta"
single_file = args.output + ".single.fasta"
if args.input_file.endswith('.gz'):
inp = gzip.open(args.input_file,'rb')
fout1= gzip.open(read1_file,'wb')
fout2= gzip.open(read2_file,'wb')
fout3= gzip.open(single_file,'wb')
else:
inp = open(args.input_file,'rU')
fout1= open(read1_file,'w')
fout2= open(read2_file,'w')
fout3= open(single_file,'w')
flag = 0
while 1 :
if flag == 0:
Read1=''
Read2=''
c=2
d=2
if flag == 0 :
while (c > 0):
inp_line = inp.readline()
Read1 += inp_line
c = c-1
if Read1 == '': break
id1 = Read1.split('\n')[0].split('/')[0]
if not id1.startswith('>'):
print ("Error!! Unknown header: not '>'")
break
tag1 = Read1.split('\n')[0].split('/')[1]
while (d > 0):
inp_line = inp.readline()
Read2 += inp_line
d = d-1
if not Read1 == '' and Read2 == '':
fout3.write(Read1)
break
id2 = Read2.split('\n')[0].split('/')[0]
if not id1.startswith('>'):
print ("Error!! Unknown header: not '>'")
break
tag2 = Read2.split('\n')[0].split('/')[1]
if tag1 == '1' and tag2 == '2' :
if id1 == id2:
fout1.write(Read1)
fout2.write(Read2)
flag=0
else:
fout3.write(Read1)
fout3.write(Read2)
flag=0
elif tag1 == '1' and tag2 == '1':
fout3.write(Read1)
flag=1
Read1=Read2
tag1=tag2
id1=id2
elif tag1 == '2' and tag2 == '2':
fout3.write(Read1)
fout3.write(Read2)
flag=0
elif tag1 == '2' and tag2 == '1':
fout3.write(Read1)
Read1=Read2
tag1=tag2
id1=id2
flag=1
def process_fastq(args):
read1_file = args.output + ".R1.fastq"
read2_file = args.output + ".R2.fastq"
single_file = args.output + ".single.fastq"
input_is_compressed = False
if args.input_file.endswith('.gz'):
inp = gzip.open(args.input_file,'rb')
read1_out_fh = gzip.open(read1_file,'wb')
read2_out_fh = gzip.open(read2_file,'wb')
singles_out_fh = gzip.open(single_file,'wb')
input_is_compressed = True
else:
inp = open(args.input_file,'rU')
read1_out_fh = open(read1_file,'w')
read2_out_fh = open(read2_file,'w')
singles_out_fh = open(single_file,'w')
line_count = 0
last_base_name = None
last_dir = None
last_base_buffer = list()
current_fh = None
for line in inp:
if input_is_compressed:
line = line.decode()
line_count += 1
if re.match(r'^\s*$', line): continue
if line_count % 4 == 1:
# this should be a header line
spaced_match = re.search(r'^\@(\S+) ([12])', line)
if spaced_match:
base = spaced_match.group(1)
dir = spaced_match.group(2)
else:
traditional_match = re.search(r'^\@(\S+)\/([12])', line)
if traditional_match:
base = traditional_match.group(1)
dir = traditional_match.group(2)
else:
raise Exception("ERROR: Expected this to be a header line, but format wasn't recognized: {0}".format(line))
## if this is /2, was /1 found?
if int(dir) == 1:
# if the last direction was 1, purge it as a singleton
if last_dir == 1:
for buffer_line in last_base_buffer:
singles_out_fh.write(buffer_line)
last_base_name = base
last_dir = 1
last_base_buffer = [line]
current_fh = None
elif int(dir) == 2:
if last_base_name == base:
if last_dir == 1:
# purge the /1
for buffer_line in last_base_buffer:
read1_out_fh.write(buffer_line)
# then write the /2
read2_out_fh.write(line)
current_fh = read2_out_fh
else:
raise Exception("ERROR: Were there two {0}/2 reads in a row?".format(last_base_name))
else:
# this must be a /2 where the /1 is missing
singles_out_fh.write(line)
current_fh = singles_out_fh
last_base_name = base
last_dir = 2
else:
if current_fh is None:
last_base_buffer.append(line)
else:
current_fh.write(line)
if __name__ == '__main__':
args = interface()
if args.input_type == 'fastq':
process_fastq(args)
elif args.input_type == 'fasta':
process_fasta(args)
else:
print ("Error! Input file format incorrect. Expected to be 'fastq' or 'fasta'");
| mit | 0eff5dec60a987764b273ef2cc8f896a | 31.716738 | 127 | 0.528007 | 3.723986 | false | false | false | false |
jorvis/biocode | sandbox/jorvis/generate_read_coverage_figure.py | 1 | 4414 | #!/usr/bin/env python3
"""
INPUT
Expected input file format (pileup):
Each line consists of 5 (or optionally 6) tab-separated columns:
1. Sequence identifier
2. Position in sequence (starting from 1)
3. Reference nucleotide at that position
4. Number of aligned reads covering that position (depth of coverage)
5. Bases at that position from aligned reads
6. Quality of those bases (OPTIONAL)
In my testing, this was generated like this:
samtools mpileup -f Trinity.fasta XUMTA_20131112.bowtie.sorted.mappedonly.bam > XUMTA_20131112.bowtie.sorted.mappedonly.mpileup
OUTPUT
The X and Y size of the resulting image is going to be a product of the --mol_size_limit and --mol_bin_size
parameters.
If you pass a value of 'plot' to the -o parameter it will invoke the interactive plot viewer rather
than writing an output file. (You can still save a file from within the viewer)
"""
import argparse
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
from biocode import utils
def main():
parser = argparse.ArgumentParser( description='Generates a figure showing coverage/abundance vs. molecule size.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input pileup file' )
parser.add_argument('-f', '--fasta_file', type=str, required=True, help='Path to the FASTA file of reference molecules' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-s', '--mol_size_limit', type=int, required=False, default=5000, help='Results for molecules over this size will be grouped together' )
parser.add_argument('-b', '--mol_bin_size', type=int, required=False, default=10, help='Set the binning resolution of the transcript size axis')
args = parser.parse_args()
## first, we need a collection of the FASTA data and the molecule lengths
molecules = utils.fasta_dict_from_file(args.fasta_file)
## data points for plotting
# structure like this:
# 500 = { 30 => 2 }
# which means: There were 2 transcripts with median coverage of 30 and length between 500 and 500+mol_bin_size
data_bins = defaultdict(lambda: defaultdict(int))
current_molecule_id = None
current_molecule_coverages = list()
## These files are usually huge. For scalability, operations performed within this
# loop should be limited.
for line in open(args.input_file):
cols = line.split("\t")
if current_molecule_id is None:
current_molecule_id = cols[0]
current_molecule_coverages = [0] * len(molecules[cols[0]]['s'])
if cols[0] != current_molecule_id:
mol_length_bin = int(len(molecules[current_molecule_id]['s']) / args.mol_bin_size)
median_size = np.median(current_molecule_coverages)
data_bins[mol_length_bin][median_size] += 1
print("DEBUG: molecule {0} appeared to be {1} bp in length with median coverage of {2}".format(current_molecule_id, len(molecules[current_molecule_id]['s']), median_size))
# reset
current_molecule_id = cols[0]
current_molecule_coverages = [0] * len(molecules[cols[0]]['s'])
try:
current_molecule_coverages[int(cols[1]) - 1] = int(cols[3])
except IndexError:
print("ERROR: pileup file reports position {0} coverage but transcript {1} is only {2} bp in length".format(cols[1], current_molecule_id, len(molecules[cols[0]]['s'])) )
# don't forget the last one
mol_length_bin = int(len(molecules[cols[0]]['s']) / args.mol_bin_size)
median_size = np.median(current_molecule_coverages)
data_bins[mol_length_bin][median_size] += 1
## now generate the plot data - x,y positions and radii
x = list()
y = list()
r = list()
for bin_size in data_bins:
for cov in data_bins[bin_size]:
x.append(bin_size)
y.append(cov)
r.append(data_bins[bin_size][cov])
plt.xlabel('Molecule length')
plt.ylabel('Median depth of coverage')
#plt.xlim(0,2000)
#plt.ylim(0,500)
plt.scatter(x, y, s=r, alpha=0.5)
if args.output_file == 'plot':
plt.show()
else:
plt.savefig(args.output_file)
if __name__ == '__main__':
main()
| mit | a3a490cbf1c31a2bfad542c009052131 | 34.312 | 183 | 0.662438 | 3.56831 | false | false | false | false |
jorvis/biocode | sandbox/jorvis/assign_igs_ids_from_evm_gff.py | 1 | 4035 | #!/usr/bin/env python3
"""
The annotation pipeline is pretty liberal about the input molecule identifiers. Once
we've reached the EVM step we assign IGS-convention identifiers. This script will
export the new GFF3 file as well as a plain text file mapping the old and new IDs.
INPUT
-----
jcf7180000271712 EVM gene 407 1013 . + . ID=evm.TU.jcf7180000271712.1;Name=EVM%20prediction%20jcf7180000271712.1
jcf7180000271712 EVM mRNA 407 1013 . + . ID=evm.model.jcf7180000271712.1;Parent=evm.TU.jcf7180000271712.1
jcf7180000271712 EVM exon 407 466 . + . ID=evm.model.jcf7180000271712.1.exon1;Parent=evm.model.jcf7180000271712.1
jcf7180000271712 EVM CDS 407 466 . + 0 ID=cds.evm.model.jcf7180000271712.1;Parent=evm.model.jcf7180000271712.1
jcf7180000271712 EVM exon 531 729 . + . ID=evm.model.jcf7180000271712.1.exon2;Parent=evm.model.jcf7180000271712.1
jcf7180000271712 EVM CDS 531 729 . + 0 ID=cds.evm.model.jcf7180000271712.1;Parent=evm.model.jcf7180000271712.1
jcf7180000271712 EVM exon 799 1013 . + . ID=evm.model.jcf7180000271712.1.exon3;Parent=evm.model.jcf7180000271712.1
jcf7180000271712 EVM CDS 799 1013 . + 2 ID=cds.evm.model.jcf7180000271712.1;Parent=evm.model.jcf7180000271712.1
ID naming convention expected from EVM (where {asm} is the input assembly ID):
gene: evm.TU.{asm}.1
mRNA: evm.model.{asm}.1
exon: evm.model.{asm}.1.exon1
exon: evm.model.{asm}.1.exon2
CDS : cds.evm.model.{asm}.1 (CDS properly share IDs across fragments)
"""
import argparse
from biocode import gff
next_id = { 'gene':1, 'mRNA':1, 'exon':1, 'CDS':1 }
def main():
parser = argparse.ArgumentParser( description='Put a description of your script here')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read' )
parser.add_argument('-o', '--output_gff', type=str, required=True, help='Path to an output GFF file to be created with new IDs' )
parser.add_argument('-p', '--id_prefix', type=str, required=True, help='Will be used as the base for all IDs generated' )
parser.add_argument('-m', '--output_map', type=str, required=False, help='This will create a tab-delimited mapping of old and new IDs' )
args = parser.parse_args()
ofh = open(args.output_gff, 'w')
if args.output_map is None:
map_ofh = None
else:
map_ofh = open(args.output_map, 'w')
idmap = dict()
for line in open(args.input_file):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
ofh.write(line + "\n")
continue
feat_id = gff.column_9_value(cols[8], 'ID')
parent_id = gff.column_9_value(cols[8], 'Parent')
if feat_id in idmap:
new_feat_id = idmap[feat_id]
else:
new_feat_id = get_new_id(args.id_prefix, cols[2], feat_id, map_ofh)
idmap[feat_id] = new_feat_id
if parent_id is None:
cols[8] = "ID={0}".format(new_feat_id)
else:
if parent_id in idmap:
new_parent_id = idmap[parent_id]
else:
new_parent_id = get_new_id(args.id_prefix, cols[2], parent_id, map_ofh)
idmap[parent_id] = new_parent_id
cols[8] = "ID={0};Parent={1}".format(new_feat_id, new_parent_id)
ofh.write( "\t".join(cols) + "\n" )
def get_new_id(prefix, feat_type, old_id, ofh):
new_id = "{0}.{1}.{2}.1".format(prefix, feat_type, next_id[feat_type])
next_id[feat_type] += 1
if ofh is not None:
ofh.write("{0}\t{1}\n".format(old_id, new_id))
return new_id
if __name__ == '__main__':
main()
| mit | 8bed4be0756859cb92dc6cc2eca3eda7 | 37.428571 | 153 | 0.590087 | 2.892473 | false | false | false | false |
jorvis/biocode | gff/replace_gff_type_column_value.py | 1 | 1553 | #!/usr/bin/env python3
"""
This simple script allows you to replace the type (3rd) column in a GFF with another value. For
example, WebApollo writes 'transcript' features rows, and many of our scripts assume these will
be 'mRNA' instead:
./replace_gff_type_column_value.py -i in.gff -it transcript -o out.gff -ot mRNA
Author: Joshua Orvis
"""
import argparse
def main():
parser = argparse.ArgumentParser( description='Adds gene features for RNAs which lack them')
## output file to be written
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input GFF3 file' )
parser.add_argument('-it', '--input_type', type=str, required=True, help='Type of feature you want to replace' )
parser.add_argument('-o', '--output', type=str, required=True, help='Output GFF3 file to write' )
parser.add_argument('-ot', '--output_type', type=str, required=True, help='New feature type value' )
args = parser.parse_args()
infile = open(args.input)
ofh = open(args.output, 'wt')
for line in infile:
if line.startswith('#'):
ofh.write(line)
continue
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
ofh.write("{0}\n".format(line) )
continue
if cols[2] == args.input_type:
cols[2] = args.output_type
ofh.write("{0}\n".format("\t".join(cols)) )
else:
ofh.write("{0}\n".format(line) )
if __name__ == '__main__':
main()
| mit | 72e4d6735c13df4da918b4a7043c1baf | 26.245614 | 116 | 0.603348 | 3.505643 | false | false | false | false |
jorvis/biocode | genbank/convert_gff3_to_gbk.py | 1 | 7974 | #!/usr/bin/env python3
"""
Converts GFF3 representing gene models to Genbank flat-file format.
GFF3 specification:
http://www.sequenceontology.org/gff3.shtml
Genbank flat file specification:
https://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html
--molecule_type:
http://www.ncbi.nlm.nih.gov/Sequin/sequin.hlp.html#Molecule
--genbank_division:
http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html#GenBankDivisionB
MAJOR assumptions:
- Functional annotation is attached to a polypeptide feature
"""
import argparse
import os
import pickle
import sys
from biocode import utils, genbank, gff
from jinja2 import Environment, FileSystemLoader
from pkg_resources import Requirement, resource_filename
# If biocode is installed via a GitHub checkout, the path to the template will be referential
template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/genbank_flat_file_header.template')
# if referential discovery didn't work, this was probably a PyPi install. Use pkg_resources to find it instead.
if os.path.isfile(template_path):
template_dir = os.path.dirname(template_path)
else:
template_path = resource_filename(Requirement.parse("biocode"), "genbank_flat_file_header.template")
template_dir = "{0}/{1}".format(os.path.dirname(template_path), '/biocode/data')
# for now, these are the same
data_dir = template_dir
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(template_dir),
trim_blocks=False)
def main():
parser = argparse.ArgumentParser( description='Converts GFF3 into a GenBank flat file')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input GFF3 file to be read' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to a Genbank flat file to be created. Supersedes --output_dir if both are specified.' )
parser.add_argument('-od', '--output_dir', type=str, required=False, help='Path to an output directory. If this option is specified then each input assembly will be written to a separate GenBank output file, named with the assembly_id.' )
parser.add_argument('-g', '--genome_fasta', type=str, required=False, help='Optional. You must specify this unless the FASTA sequences for the molecules are embedded in the GFF')
parser.add_argument('-go', '--go_index', type=str, required=False, default='', help='Pickled GO index (created by biocode/general/make_go_index.py). By default, reads from within the data directory within the biocode distribution')
parser.add_argument('-mt', '--molecule_type', type=str, required=False, default='DNA', help='Molecule type' )
parser.add_argument('-gbd', '--genbank_division', type=str, required=False, default='.', help='GenBank Division (3-letter abbreviation)' )
parser.add_argument('-md', '--modification_date', type=str, required=False, default='DD-MMM-YYYY', help='The modification date for header in format like 21-JUN-1999' )
parser.add_argument('-org', '--organism', type=str, required=False, default='.', help='Full organism name (including strain)' )
parser.add_argument('-str', '--strain', type=str, required=False, help="Only the strain designation, which is written to the FEATURES.source element" )
parser.add_argument('-d', '--definition', type=str, required=False, default='.', help='Brief description of sequence; includes information such as source organism, gene name/protein name, or some description of the sequence\'s function.' )
parser.add_argument('-s', '--source', type=str, required=False, default='.', help='Free-format information including an abbreviated form of the organism name, sometimes followed by a molecule type.' )
parser.add_argument('-t', '--taxon_id', type=int, required=False, help='NCBI taxon ID, if known' )
parser.add_argument('-l', '--lineage', type=str, required=False, default='Unknown', help='Semicolon-delimited lineage of the organism e.g., "Eukaryota; Alveolata; Apicomplexa; Aconoidasida; Piroplasmida; Theileriidae; Theileria"' )
parser.add_argument('-seq', '--include_sequence', action='store_true', help='Include sequence (if present) in the output GenBank flat file(s).' )
parser.add_argument('-p', '--locus_id_prefix', required=False, default='', help='Prefix to add to the GenBank LOCUS id in the output GenBank flat file(s).' )
args = parser.parse_args()
# check that output directory exists
if args.output_dir is not None:
if not os.path.isdir(args.output_dir):
sys.stderr.write("FATAL: the specified output directory (" + args.output_dir + ") does not exist\n");
exit(1)
# line-wrap lineage to stay below 79 character GenBank flat file width
lineage = genbank.line_wrap_lineage_string(args.lineage)
if args.go_index == '':
go_index_path = "{0}/go.pickle".format(data_dir)
else:
go_index_path = args.go_index
if os.path.isfile(go_index_path):
go_index = pickle.load(open(go_index_path, 'rb'))
else:
raise Exception("ERROR: Expected to find a pickled GO index at the following path: {0}".format(go_index_path))
(assemblies, features) = gff.get_gff3_features(args.input_file)
ofh = sys.stdout
if args.output_file is not None:
if args.output_dir is None:
ofh = open(args.output_file, 'wt')
else:
sys.stderr.write("WARN: both -o/--output_file and -od/--output_dir were passed so the former will be ignored\n")
# deal with the FASTA file if the user passed one
if args.genome_fasta is not None:
process_assembly_fasta(assemblies, args.genome_fasta)
for assembly_id in assemblies:
locus_id = args.locus_id_prefix + assembly_id
if args.output_dir is not None:
ofn = args.output_dir + "/" + locus_id + ".gbk"
ofh = open(ofn, 'wt')
assembly = assemblies[assembly_id]
context = { 'locus':locus_id, 'molecule_size':assembly.length, 'molecule_type':args.molecule_type,
'division':args.genbank_division, 'modification_date':args.modification_date,
'accession':'.', 'version':'.',
'source':args.source, 'definition':args.definition, 'organism':args.organism,
'lineage':lineage
}
header = TEMPLATE_ENVIRONMENT.get_template('genbank_flat_file_header.template').render(context)
ofh.write(header)
ofh.write("\nFEATURES Location/Qualifiers\n")
ofh.write(" source 1..{0}\n".format(assembly.length))
ofh.write(" /organism=\"{0}\"\n".format(args.organism))
ofh.write(" /mol_type=\"genomic DNA\"\n")
if args.strain is not None:
ofh.write(" /strain=\"{0}\"\n".format(args.strain))
if args.taxon_id is not None:
ofh.write(" /db_xref=\"taxon:{0}\"\n".format(args.taxon_id))
for gene in assemblies[assembly_id].genes():
genbank.print_biogene(gene=gene, fh=ofh, on=assembly, go_index=go_index)
if args.include_sequence:
ofh.write("ORIGIN\n")
genbank.print_sequence(seq=assembly.residues, fh=ofh)
ofh.write("//\n")
# there may be multiple output files
if args.output_dir is not None:
ofh.close()
# there is only one output file
if args.output_dir is None:
ofh.close()
def process_assembly_fasta(mols, fasta_file):
fasta_seqs = utils.fasta_dict_from_file(fasta_file)
for mol_id in mols:
# check if the FASTA file provides sequence for this
if mol_id in fasta_seqs:
mol = mols[mol_id]
mol.residues = fasta_seqs[mol_id]['s']
mol.length = len(mol.residues)
if __name__ == '__main__':
main()
| mit | e0c29c21ec348411f27841c97561ac9f | 47.036145 | 243 | 0.664159 | 3.509683 | false | false | false | false |
jorvis/biocode | lib/biocode/annotation.py | 1 | 19857 | '''
Warning: this module requires Python 3.2 or higher
This is a set of classes to represent what I have most commonly needed when working on
dozens (eukaryotic) or hundreds (prokaryotic) of annotation projects. If you have
other attributes that you'd like to see supported, please add an 'issue' on the
biocode GitHub page.
'''
import re
class FunctionalAnnotation:
"""
While recognizing that an enormous variety of attributes could go here in
describing the functional annotation of a BioThing, I'm starting with those
we most often encounter and need to be available in common output formats.
These most common attributes are accessed by name, but the others are found
as a dict in the 'other_attributes' property.
Also, there's a place for having attributes like this abstracted, stored in
ontologies, etc. We've done all that before. For now I'm going to try
and hopefully enjoy the utility of having the most common properties always
directly, and simply, available.
"""
def __init__( self, product_name=None, gene_symbol=None, go_annotations=None, ec_numbers=None, dbxrefs=None ):
self.product_name = product_name
self.gene_symbol = gene_symbol
self.go_annotations = go_annotations
self.ec_numbers = ec_numbers
self.dbxrefs = dbxrefs
self.other_attributes = dict()
if self.go_annotations is None:
self.go_annotations = list()
if self.ec_numbers is None:
self.ec_numbers = list()
if self.dbxrefs is None:
self.dbxrefs = list()
def __str__(self):
representation = "Product name: {0}\nGene symbol : {1}\n".format(self.product_name, self.gene_symbol)
if len(self.go_annotations) > 0:
representation += "GO annotations:\n"
for go_annot in self.go_annotations:
representation += "\tGO:{0}\n".format(go_annot.go_id)
else:
representation += "GO annotations: None\n"
if len(self.ec_numbers) > 0:
representation += "EC numbers:\n"
for ec in self.ec_numbers:
representation += "\t{0}\n".format(ec.number)
else:
representation += "EC numbers: None\n"
if len(self.dbxrefs) > 0:
representation += "Dbxrefs:\n"
for dbxref in self.dbxrefs:
representation += "\t{0}:{1}\n".format(dbxref.db, dbxref.identifier)
else:
representation += "Dbxrefs: None\n"
return representation
def add_dbxref(self, dbxref):
"""
Stores a Dbxref object within an annotation. The thing passed can either be a
Dbxref object or string like "sourcedb:identifier" and it will be automatically
parsed.
"""
if type(dbxref).__name__ == 'Dbxref':
self.dbxrefs.append(dbxref)
elif type(dbxref).__name__ == 'str':
m = re.match("(.+)\:(.+)", dbxref)
if m:
self.dbxrefs.append(Dbxref(db=m.group(1), identifier=m.group(2)))
else:
raise Exception("ERROR: Annotation.add_dbxref(): If string passed, expected format was 'source:identifier'")
else:
raise Exception("ERROR: Annotation.add_dbxref expected a Dbxref object or string to be passed")
def add_ec_number(self, ec_num):
"""
TODO: Modify this to allow passing ECAnnotation object or string.
Right now it expects an ECAnnotation object
"""
self.ec_numbers.append(ec_num)
def add_go_annotation(self, go):
"""
TODO: Modify this to allow passing GOAnnotation object or string.
Right now it expects an GOAnnotation object
"""
self.go_annotations.append(go)
def process_gene_symbol(self):
"""
This method applies a series of rules based on our experience in annotation
to gene symbols, attempting to correct things before they get submitted to
GenBank. This includes:
- Removing anything after the first whitespace
- DOES NOT change case. Genbank demands that prok submissions all use lower
case but allows organism-specific conventions to be followed for others.
- https://www.ncbi.nlm.nih.gov/genbank/genomesubmit_annotation/
This method returns the new gene symbol rather than overwriting the attribute. For
that, use set_processed_gene_symbol instead.
"""
new_gs = self.gene_symbol
if not new_gs:
return new_gs
# take off everything after and including the first space, if any
new_gs = new_gs.split(' ', 1)[0]
return new_gs
def process_product_name(self):
"""
This method applies a series of rules based on years of annotation experience
to gene product names, attempting to correct much of the mess of things which
which get submitted to Genbank. This includes:
- Removing trailing periods
- Nonredundifies: 'protein protein', 'family family', 'family protein family protein',
'protein family protein' and similar
- Changes 'superfamily' to 'family'
- Any starting with 'ORF' or 'orf' get changed to 'conserved hypothetical protein'
- Any products which contain 'homolog' get changed to CHP*
- Any products with 'similar to' get changed to CHP
- Any with 'DUF' or 'UPF' get changed to CHP
- Any with 'ncharacteri' (captures US/british spellings of 'uncharacterized'), gets changed to CHP
- Select words are changed to their american spellings
- Changes any of (predicted|possible|potential|probable) to 'putative'
- Change 'TTG start' to CHP
- Change any starting with 'residues' to CHP
- Removes 'C-terminus' and 'N-terminus' from end of product name
- Strips embedded Dbxrefs from end of name:
- Example: (2E,6E)-farnesyl diphosphate synthase {ECO:0000313|EMBL:OOP19401.1}
- Then a long list of manual, specific name changes
* CHP = conserved hypothetical protein
It returns the new product name rather than overwriting the attribute. For that,
use set_processed_product_name().
Note: NCBI submission rules: https://www.ncbi.nlm.nih.gov/genbank/asndisc.examples/
Especially the section: SUSPECT_PRODUCT_NAMES
These are NOT currently all implemented here
"""
new_product = self.product_name
default_product = 'conserved hypothetical protein'
# remove/replace troublesome strings
new_product = new_product.rstrip('.')
new_product = new_product.replace('protein protein', 'protein')
new_product = new_product.replace('superfamily', 'family')
new_product = new_product.replace('family family', 'family')
new_product = new_product.replace('family protein family protein', 'family protein')
new_product = new_product.replace('family protein domain protein', 'family protein')
new_product = new_product.replace('domain domain protein', 'domain protein')
new_product = new_product.replace('protein family protein', 'family protein')
new_product = new_product.replace('superfamily protein family protein', 'family protein')
new_product = new_product.replace(' Protein-like family protein', '-like family protein')
new_product = new_product.replace(' protein-like family protein', '-like family protein')
# takes patterns like this off the end: {ECO:0000313|EMBL:OOP19401.1}
m = re.match('(.+) \{.+\:.+\}', new_product)
if m:
new_product = m.group(1)
if new_product.lower().startswith('orf'):
return default_product
if 'ncharacteri' in new_product.lower():
return default_product
# process some homolog-specific names
if 'homolog' in new_product.lower():
if 'shiA homolog' in new_product:
new_product = 'shiA protein'
elif 'virulence factor mviM homolog' in new_product:
new_product = 'virulence factor mviM'
elif 'protein phnA homolog' in new_product:
new_product = 'phnA protein'
elif 'protein seqA homolog' in new_product:
new_product = 'seqA protein'
else:
return default_product
if 'similar to' in new_product.lower():
return default_product
if 'DUF' in new_product or 'UPF' in new_product:
return default_product
# If it is any form of conserved hypothetical, return the proper version of that.
if 'onserved hypothe' in new_product.lower():
return default_product
if 'unnamed' in new_product.lower():
return default_product
# Is the name *only* protein (with whitespace)
if new_product.lower().lstrip().rstrip() == 'protein':
return default_product
# Some sources give products which start with the word 'residues'
if new_product.lower().startswith('residues'):
return default_product
# Some proteins are simply named 'TTG start'
if new_product.lower().startswith('ttg start'):
return default_product
# Correct a class of short bogus names we've often encountered
m = re.match('^\w{1,2}\d{1,3}$', new_product)
if m:
return default_product
m = re.match('gene \d+ protein', new_product)
if m:
return default_product
m = re.match('(.*)\d*\s*[CN]\-terminus$', new_product)
if m:
new_product = m.groups(1)
# removes trailing symbols
new_product = new_product.rstrip('.,-_:/')
# Names can't end in family. Example replacements:
# Raf kinase inhibitor-like protein, YbhB/YbcL family -> YbhB/YbcL family Raf kinase inhibitor-like protein
# phage major capsid protein, HK97 family -> HK97 family phage major capsid protein
# phage portal protein, lambda family -> lambda family phage portal protein
if new_product.endswith(' family'):
m = re.match('(.+), (.+ family)', new_product)
if m:
new_product = "{0} {1}".format(m.group(2), m.group(1))
# If family still remains in the name twice, take out the first one
# Peptidase family S49 family protein -> Peptidase S49 family protein
if new_product.count('family') > 1:
m = re.match('(.+?) family (.+)', new_product)
if m:
new_product = "{0} {1}".format(m.group(1), m.group(2))
# Americanize some words. I'm up for arguments against these, but adding them
# because our previous software had them.
new_product.replace('utilisation', 'utilization')
new_product.replace('utilising', 'utilizing')
new_product.replace('dimerisation', 'dimerization')
new_product.replace('disulphide', 'disulfide')
new_product.replace('sulphur', 'sulfur')
new_product.replace('mobilisation', 'mobilization')
# standardize several different forms of 'putative' to a single one
new_product = re.sub("predicted", "putative", new_product, flags=re.I)
new_product = re.sub("possible", "putative", new_product, flags=re.I)
new_product = re.sub("potential", "putative", new_product, flags=re.I)
new_product = re.sub("probable", "putative", new_product, flags=re.I)
# Fix incorrect spellings which are getting transitively annotated
new_product = re.sub("putaive", "putative", new_product, flags=re.I)
# Replacements requiring word boundaries
patt = re.compile(r'\b(?:%s)\b' % 'asparate')
new_product = re.sub(patt, "aspartate", new_product)
# Now a long series of manual name changes we've gathered over the years
# the key is the source, value is what it will be changed to.
replacement_products = {
'alr5027 protein': 'heme-binding protein HutZ',
'arginine-tRNA-transferase, C terminus family protein': 'putative arginine-tRNA-transferase',
'bacterial regulatory helix-turn-helix proteins, AraC family protein': 'transcriptional regulator, AraC family',
'bacterial regulatory proteins, gntR family protein': 'transcriptional regulator, GntR family',
'bacterial regulatory proteins, lacI family protein': 'transcriptional regulator, LacI family',
'bacterial regulatory proteins, luxR family protein': 'transcriptional regulator, LuxR family',
'bacterial regulatory proteins, tetR family protein': 'transcriptional regulator, TetR family',
'bordetella uptake gene (bug) product family protein': 'bug family protein',
'conserved protein with nucleoside triphosphate hydrolase domain': 'putative ATP-dependent endonuclease',
'cyclic di-GMP binding protein VCA0042': 'cyclic di-GMP binding protein',
'cytochrome b(C-terminal)/b6/petD family protein': 'cytochrome b family protein',
'domain related to MnhB subunit of Na+/H+ antiporter family protein': 'Na+/H+ antiporter family protein',
'FAD linked oxidases, C-terminal domain protein': 'FAD linked oxidase domain protein',
'FGGY family of carbohydrate kinases, C-terminal domain protein': 'carbohydrate kinase, FGGY family',
'gene 25-like lysozyme family protein': 'lysozyme family protein',
'glutamate synthases, NADH/NADPH, small subunit domain protein': 'glutamate synthase, NADH/NADPH, small subunit',
'glycogen/starch synthases, ADP-glucose type family protein': 'glycogen/starch synthase',
'GSPII_E N-terminal domain protein': 'bacteriophage N4 adsorption protein B',
'hydro-lases, Fe-S type, tartrate/fumarate subfamily, beta region domain protein': 'fumarate hydratase family protein',
'invasion gene expression up-regulator, SirB family protein': 'invasion gene expression up-regulator',
'K+ potassium transporter family protein': 'potassium uptake protein',
'menC_gamma/gm+: o-succinylbenzoic acid (OSB) synthetase': 'o-succinylbenzoic acid (OSB) synthetase',
'phage/plasmid replication , gene II/X family protein': 'phage/plasmid replication protein, gene II/X family',
'phospholipase d active site motif family protein': 'phospholipase D family protein',
'PIII': default_product,
'putative 2-hydroxyacid dehydrogenase HI_1556': 'putative 2-hydroxyacid dehydrogenase',
'SULFATE TRANSPORTER SULFATE TRANSPORTER FAMILY PROTEIN': 'sulfate permease family protein',
'thiamin/thiamin pyrophosphate ABC transporter, thiamin/thiamin pyrophospate-binding protein': 'thiamin/thiamine pyrophosphate ABC transporter, thiamin/thiamine pyrophospate-binding protein',
'traG-like , N-terminal region family protein': 'putative traG protein',
'transcriptional activator of defense systems': 'multiple antibiotic resistance protein MarA',
'transcriptional regulatory protein, C terminal family protein': 'putative transcriptional regulator',
'transposase and inactivated derivative': 'putative transposase',
'tripartite ATP-independent periplasmic transporters, DctQ component family protein': 'tripartite ATP-independent periplasmic transporter, DctQ family',
'type IV secretory pathway VirD2 components': 'type IV secretory pathway protein',
'zn-dependent hydrolase of the beta-lactamase fold': default_product,
}
for old in replacement_products:
if new_product == old:
new_product = replacement_products[old]
break
return new_product.rstrip().lstrip()
def set_processed_gene_symbol(self):
"""
See FunctionalAnnotation.process_gene_symbol() for full list of actions
"""
self.gene_symbol = self.process_gene_symbol()
def set_processed_product_name(self):
"""
See FunctionalAnnotation.process_product_name() for full list of actions
"""
self.product_name = self.process_product_name()
class Dbxref:
"""
These allow for specification of an identifier in another database by ID. These
are not meant to be used for Ontology term linking, but rather limited only to
identifiers. Examples:
SGD:S0006169
KEGG:K06223
The first part is the 'db' and the second is the 'identifier'.
## notes on method signature overloading (for future use)
jorvis: You'd write a @classmethod. I'd name it "fromstring"
jorvis: the classmethod would parse that string into values which it would use to
invoke the normal constructor, as appropriate.
"""
def __init__( self, db=None, identifier=None ):
self.db = db
self.identifier = identifier
class GOAnnotation:
"""
A functional annotation can have an infinite number of associated GO Annotations
Details here:
http://www.geneontology.org/GO.evidence.shtml
Yes, the 'with_from' attribute name is awkward, but 'with/from' isn't legal and
both 'with' and 'from' are python reserved words.
The go_id attribute here is just the numeric portion without "GO" or "GO:" or
anything else attached (allowing the developer to define it as required.)
"""
def __init__( self, go_id=None, ev_code=None, with_from=None ):
self.go_id = go_id
self.ev_code = ev_code
self.with_from = with_from
## process any GO ID passed to only contain the numeric portion
go_pattern = re.compile('(\d+)')
m = go_pattern.search(self.go_id)
if m:
self.go_id = m.group(1)
else:
raise Exception("ERROR: failed to extract numeric portion of ID from new GOAnnotation")
class ECAnnotation:
"""
A functional annotation can have an infinite number of associated EC Annotations
Details here:
http://www.chem.qmul.ac.uk/iubmb/enzyme/
While the official terms for the levels are 'class', 'subclass' etc. we have to use
different keys for the attributes since those conflict with reserved keywords in
both python and other frameworks.
class1 = 1 = Oxidoreductases
class2 = 1.10 = Acting on diphenols and related substances as donors
class3 = 1.10.3 = With oxygen as acceptor
number = 1.10.3.2 = laccase
Currently does not have an index of EC terms to provide other attributes which will
be added in the future, such as:
accepted_name = laccase
reaction = 4 benzenediol + O2 = 4 benzosemiquinone + 2 H2O
systematic_name = benzenediol:oxygen oxidoreductase
CAS_registry_number = 80498-15-3
"""
def __init__(self, number=None):
self.number = number
self.class1 = None
self.class2 = None
self.class3 = None
re_pattern = re.compile('(((([0-9\-]+)\.[0-9\-]+)\.[0-9\-]+)\.[a-z0-9\-]+)')
m = re_pattern.search(self.number)
if m:
self.class1 = m.group(4)
self.class2 = m.group(3)
self.class3 = m.group(2)
self.number = m.group(1)
else:
raise Exception("ERROR: Attempt to add an EC number ({0}) in unrecognized format. Expected N.N.N.N (where N can be 0-9 or a dash)".format(self.number))
def __repr__(self):
return "EC:{0}".format(self.number)
| mit | ce2bab2032b9c8cd9cdfdbf381303e41 | 45.503513 | 203 | 0.638415 | 3.787335 | false | false | false | false |
jorvis/biocode | lib/biocode/bed.py | 1 | 4091 | import sys
import biocode.utils
import biocode.things
def print_bed_from_assemblies(assemblies=None, ofh=None):
"""
Utility function to write a BED file from a list() of biothings.Assembly objects.
No headers are used so the output is compatible with other tools like bedToBigBed.
References:
BED format descriptions:
- http://genome.ucsc.edu/FAQ/FAQformat#format1
- http://bedtools.readthedocs.io/en/latest/content/general-usage.html
bedToBigBed:
- http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64/bedToBigBed
Representing gene models in BED:
- http://transvar.org/6111/gene_models.pdf
"""
if type(ofh).__name__ != 'TextIOWrapper':
ofh = sys.stdout #TextIOWrapper
for assembly_id in assemblies:
current_assembly = assemblies[assembly_id]
for gene in sorted(assemblies[assembly_id].genes()):
rnas_found = 0
mRNAs = gene.mRNAs()
for mRNA in mRNAs:
mRNA_loc = mRNA.location_on(current_assembly)
rnas_found += 1
if rnas_found > 1:
gene.remove_mRNA(mRNA)
print("INFO: splitting mRNA off gene {0}".format(gene.id))
new_gene = biocode.things.Gene(id="{0}_{1}".format(gene.id, rnas_found))
new_gene.locate_on(target=current_assembly, fmin=mRNA_loc.fmin, fmax=mRNA_loc.fmax, strand=mRNA_loc.strand)
new_gene.add_RNA(mRNA)
print_biogene(gene=new_gene, fh=ofh)
if len(mRNAs) > 1:
gene_loc = gene.location_on(current_assembly)
mRNA_loc = mRNAs[0].location_on(current_assembly)
gene_loc.fmin = mRNA_loc.fmin
gene_loc.fmax = mRNA_loc.fmax
gene_loc.strand = mRNA_loc.strand
print_biogene(gene=gene, fh=ofh)
def print_biogene(gene=None, fh=None, on=None):
'''
This method accepts a Gene object located on an Assembly object (from things.py) and prints
the gene in BED format.
The 4th column of the BED output is to identify the gene. By default, the locus identifier
is used but if this isn't present we default to the gene's ID.
'''
if gene is None:
raise Exception( "ERROR: The print_biogene() function requires a biogene to be passed via the 'gene' argument" );
## we can auto-detect the molecule if the user didn't pass one
# and if there's only one.
if on is None:
on = gene.location().on
gene_loc = gene.location_on( on )
locus_tag = gene.id if gene.locus_tag is None else gene.locus_tag
gene_strand = '+' if gene_loc.strand == 1 else '-'
for RNA in gene.RNAs():
fh.write("{0}\t{1}\t{2}\t{3}\t0\t{4}\t".format(
on.id, gene_loc.fmin, gene_loc.fmax, locus_tag, gene_strand
))
RNA_loc = RNA.location_on(on)
if RNA_loc is None:
raise Exception("ERROR: Expected RNA {0} to be located on {1} but it wasn't".format(RNA.id, on.id))
exons = sorted(RNA.exons())
if RNA_loc.strand == -1:
exons.reverse()
block_starts = list()
exon_lengths = list()
for exon in exons:
exon_loc = exon.location_on(on)
block_starts.append(str(exon_loc.fmin - gene_loc.fmin))
exon_lengths.append(str(exon_loc.fmax - exon_loc.fmin))
if len(exons):
thick_start = exons[0].location()
thick_end = exons[-1].location()
fh.write("{0}\t{1}\t0\t{2}\t{3},\t{4},\n".format(
thick_start.fmin, thick_end.fmax, len(exons), ','.join(exon_lengths), ','.join(block_starts)))
else:
# Catch here for genes without exons (tRNAs, etc.)
fh.write("{0}\t{1}\t0\t{2}\t{3},\t0,\n".format(gene_loc.fmin, gene_loc.fmin, 1,
gene_loc.fmax - gene_loc.fmin))
| mit | d1a19b82fe7d70610724d6762f49c8a5 | 30.713178 | 127 | 0.56661 | 3.34232 | false | false | false | false |
jorvis/biocode | gff/convert_metagenemark_gff_to_gff3.py | 1 | 7302 | #!/usr/bin/env python3
import argparse
import re
from biocode import things
'''
This script converts the GFF-ish output of Metagenemark into legal GFF3 with full,
canonical gene models.
http://www.sequenceontology.org/gff3.shtml
EXPECTED INPUT example (the commented lines are ignored):
855 length:1510 GeneMark.hmm gene 1 852 . - 0 gene_id 1
##Protein 1
##MAAKYDDVQELLRKKADINARLSLLAYDGTPEIKNRGNGKYLYTRKRVSGKLTSTYVGVY
##SDDLYNLLLRNASESRQLRKELRAVVRQLANAGYSNSELSADLVNNIAFARANLKANIYD
##QAVLEGVATTFPQTEEIIDNGKVFGVSASDVQKILNLKHAWEFILDEDVVLSRSDYYMLS
##HIAKIVNEGFFLDGGRIRGVPVAIGGSTYIPPIPNEIDVKEKIRNIVEDEGDSENVEGRV
##RRKEPIDIAIELCVYCMKSQIFLDGNKRASVIFANHYLISHGIG
##end-Protein
35 length:9081 GeneMark.hmm gene 1 3378 . - 0 gene_id 2
35 length:9081 GeneMark.hmm gene 3974 4357 . - 0 gene_id 3
35 length:9081 GeneMark.hmm gene 4398 4652 . - 0 gene_id 4
35 length:9081 GeneMark.hmm gene 4792 9081 . - 0 gene_id 5
##Protein 2
##MNDKMQMHRLIEQKRIEGADKKPRYGMRKLTIGTVSCLLGFASLLAFTTPNFSQAAESGN
##TGGGTNSLITGTVPKENKSSSEEGEKLRAPQDVSGQLENLKIKLSGDNHENASLIHPLRP
##ADDNDESSDQQMKVNFSFDVDGSEIKEGDYFDLNLSNNLNLYGATSKKADIQTKLYVGND
##LVAKGVYDVENHRMRYTFTKKASEYGHFSQSVMEPVFIDAKGVPTNNKNVAVEASIGNHK
##SQKEVEVTYDLQPQAGQDNLNSNGTANLFDIDESTGTYKETIYVNNKQREQNNTRILIEN
EXPECTED INPUT (v3.25+)
In my tests of v 3.25 the use of 'gene' in the column above has been replaced with CDS instead.
The rest appears to be the same.
SRS019986_Baylor_scaffold_53 GeneMark.hmm CDS 3 512 -635.997977 + 0 gene_id=18, length=510, gene_score=-635.997977, rbs_score=-0.020000, rbs_spacer=-1, stop_enforced=N, start_codon=0, logodd=37.242660
SRS019986_Baylor_scaffold_53 GeneMark.hmm CDS 530 1501 -1202.233893 + 0 gene_id=19, length=972, gene_score=-1202.233893, rbs_score=-0.020000, rbs_spacer=-1, stop_enforced=N, start_codon=0, logodd=62.393607
SRS019986_Baylor_scaffold_53 GeneMark.hmm CDS 1603 2109 -608.550058 + 0 gene_id=20, length=507, gene_score=-608.550058, rbs_score=-0.020000, rbs_spacer=-1, stop_enforced=N, start_codon=0, logodd=52.060246
EXAMPLE OUTPUT:
855 GeneMark.hmm gene 1 852 . - . ID=HUZ239124.gene.1
855 GeneMark.hmm mRNA 1 852 . - . ID=HUZ239124.mRNA.1;Parent=HUZ239124.gene.1
855 GeneMark.hmm CDS 1 852 . - 0 ID=HUZ239124.CDS.1;Parent=HUZ239124.mRNA.1
855 GeneMark.hmm exon 1 852 . - . ID=HUZ239124.exon.1;Parent=HUZ239124.mRNA.1
855 GeneMark.hmm polypeptide 1 852 . - . ID=HUZ239124.polypeptide.1;Parent=HUZ239124.mRNA.1
'''
def main():
parser = argparse.ArgumentParser( description='Metagenemark GFF -> GFF3 conversion script')
## output file to be written
parser.add_argument('-i', '--input', type=str, required=True, help='Path to a GFF file created by Metagenemark' )
parser.add_argument('-o', '--output', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-p', '--prefix', type=str, required=True, help='Prefix to use in ID generation')
parser.add_argument('-pf', '--protein_fasta', type=str, required=False, help='Optional protein FASTA to be written')
args = parser.parse_args()
assemblies = dict()
current_assembly = None
# key like 2 = SRS014890.polypeptide.2
polypeptide_lookup = dict()
writing_protein = False
gene = None
mRNAs = dict()
current_sequence = None
current_gene_comment_lines = list()
fout = open(args.output, mode='wt', encoding='utf-8')
fout.write("##gff-version 3\n")
if args.protein_fasta is not None:
protein_out = open(args.protein_fasta, mode='wt', encoding='utf-8')
for line in open(args.input):
if line.startswith("#"):
if line.startswith("##FASTA"):
current_gene_comment_lines.append("#{0}".format(line))
elif line.startswith("##end-Protein"):
writing_protein = False
current_gene_comment_lines.append(line)
# since we're already doing our own header, don't duplicate the old one
elif line.startswith("##gff-version"):
continue
else:
if line.startswith("##Protein "):
m = re.match("##Protein (\d+)", line)
if m:
writing_protein = True
protein_out.write(">{0}\n".format(polypeptide_lookup[m.group(1)]))
else:
raise Exception("ERROR: Expected line to match: ##Protein N")
elif writing_protein == True:
protein_out.write(line[2:])
current_gene_comment_lines.append(line)
else:
cols = line.split("\t")
if len(cols) != 9:
continue
mol_id = cols[0]
mol_id_m = re.match('^(\S+) ', mol_id)
if mol_id_m:
print("MATCH!")
mol_id = mol_id_m.group(1)
feat_type = cols[2]
## we expect only gene types here
if feat_type not in ['gene', 'CDS']:
raise Exception("ERROR: expected only 'gene' or 'CDS' feature types as input (depending on metagenemark version).")
m_gene = re.match('gene_id[ =](\d+)', cols[8])
if m_gene:
gene_num = m_gene.group(1)
else:
raise Exception("ERROR: expected 9th column to have gene ids like: gene_id 5")
## initialize this assembly if we haven't seen it yet
if mol_id not in assemblies:
assemblies[mol_id] = things.Assembly(id=mol_id)
current_assembly = assemblies[mol_id]
gene = things.Gene(id="{0}.gene.{1}".format(args.prefix, gene_num))
gene.locate_on( target=current_assembly, fmin=int(cols[3]) - 1, fmax=int(cols[4]), strand=cols[6] )
mRNA = things.mRNA(id="{0}.mRNA.{1}".format(args.prefix, gene_num), parent=gene.id)
mRNA.locate_on( target=current_assembly, fmin=int(cols[3]) - 1, fmax=int(cols[4]), strand=cols[6] )
gene.add_mRNA(mRNA)
CDS = things.CDS(id="{0}.CDS.{1}".format(args.prefix, gene_num), parent=mRNA.id)
CDS.locate_on( target=current_assembly, fmin=int(cols[3]) - 1, fmax=int(cols[4]), strand=cols[6], phase=int(cols[7]) )
mRNA.add_CDS(CDS)
exon = things.Exon(id="{0}.exon.{1}".format(args.prefix, gene_num), parent=mRNA.id)
exon.locate_on( target=current_assembly, fmin=int(cols[3]) - 1, fmax=int(cols[4]), strand=cols[6] )
mRNA.add_exon(exon)
polypeptide_id = "{0}.polypeptide.{1}".format(args.prefix, gene_num)
polypeptide = things.Polypeptide(id=polypeptide_id, parent=mRNA.id)
polypeptide.locate_on( target=current_assembly, fmin=int(cols[3]) - 1, fmax=int(cols[4]), strand=cols[6] )
mRNA.add_polypeptide(polypeptide)
polypeptide_lookup[gene_num] = polypeptide_id
gene.print_as(fh=fout, source='GeneMark.hmm', format='gff3')
fout.write( "".join(current_gene_comment_lines) )
current_gene_comment_lines = list()
if __name__ == '__main__':
main()
| mit | 1059223de303d474d5f9c553cfdef940 | 40.488636 | 237 | 0.625308 | 2.846784 | false | false | false | false |
ginkgobioworks/edge | src/edge/south_migrations/0004_auto__chg_field_genome_parent.py | 1 | 7758 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Genome.parent'
db.alter_column(
u"edge_genome",
"parent_id",
self.gf("django.db.models.fields.related.ForeignKey")(
to=orm["edge.Genome"], null=True, on_delete=models.PROTECT
),
)
def backwards(self, orm):
# Changing field 'Genome.parent'
db.alter_column(
u"edge_genome",
"parent_id",
self.gf("django.db.models.fields.related.ForeignKey")(
to=orm["edge.Genome"], null=True
),
)
models = {
"edge.chunk": {
"Meta": {"object_name": "Chunk"},
"id": (
"django.db.models.fields.BigIntegerField",
[],
{"primary_key": "True"},
),
"initial_fragment": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Fragment']", "on_delete": "models.PROTECT"},
),
"sequence": ("django.db.models.fields.TextField", [], {"null": "True"}),
},
"edge.chunk_feature": {
"Meta": {"object_name": "Chunk_Feature"},
"chunk": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Chunk']", "on_delete": "models.PROTECT"},
),
"feature": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Feature']", "on_delete": "models.PROTECT"},
),
"feature_base_first": ("django.db.models.fields.IntegerField", [], {}),
"feature_base_last": ("django.db.models.fields.IntegerField", [], {}),
"id": (
"django.db.models.fields.BigIntegerField",
[],
{"primary_key": "True"},
),
},
"edge.edge": {
"Meta": {"object_name": "Edge"},
"fragment": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Fragment']", "on_delete": "models.PROTECT"},
),
"from_chunk": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'out_edges'",
"on_delete": "models.PROTECT",
"to": "orm['edge.Chunk']",
},
),
"id": (
"django.db.models.fields.BigIntegerField",
[],
{"primary_key": "True"},
),
"to_chunk": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'in_edges'",
"null": "True",
"on_delete": "models.PROTECT",
"to": "orm['edge.Chunk']",
},
),
},
"edge.feature": {
"Meta": {"object_name": "Feature"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"length": ("django.db.models.fields.IntegerField", [], {}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"strand": ("django.db.models.fields.IntegerField", [], {"null": "True"}),
"type": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
},
"edge.fragment": {
"Meta": {"object_name": "Fragment"},
"circular": ("django.db.models.fields.BooleanField", [], {}),
"created_on": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "null": "True", "blank": "True"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "256"}),
"parent": (
"django.db.models.fields.related.ForeignKey",
[],
{
"to": "orm['edge.Fragment']",
"null": "True",
"on_delete": "models.PROTECT",
},
),
"start_chunk": (
"django.db.models.fields.related.ForeignKey",
[],
{
"to": "orm['edge.Chunk']",
"null": "True",
"on_delete": "models.PROTECT",
},
),
},
"edge.fragment_chunk_location": {
"Meta": {
"unique_together": "(('fragment', 'chunk'),)",
"object_name": "Fragment_Chunk_Location",
"index_together": "(('fragment', 'base_last'), ('fragment', 'base_first'))",
},
"base_first": ("django.db.models.fields.IntegerField", [], {}),
"base_last": ("django.db.models.fields.IntegerField", [], {}),
"chunk": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Chunk']", "on_delete": "models.PROTECT"},
),
"fragment": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Fragment']", "on_delete": "models.PROTECT"},
),
"id": (
"django.db.models.fields.BigIntegerField",
[],
{"primary_key": "True"},
),
},
"edge.genome": {
"Meta": {"object_name": "Genome"},
"created_on": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "null": "True", "blank": "True"},
),
"fragments": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['edge.Fragment']",
"through": "orm['edge.Genome_Fragment']",
"symmetrical": "False",
},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "256"}),
"notes": (
"django.db.models.fields.TextField",
[],
{"null": "True", "blank": "True"},
),
"parent": (
"django.db.models.fields.related.ForeignKey",
[],
{
"to": "orm['edge.Genome']",
"null": "True",
"on_delete": "models.PROTECT",
},
),
},
"edge.genome_fragment": {
"Meta": {"object_name": "Genome_Fragment"},
"fragment": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Fragment']"},
),
"genome": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['edge.Genome']"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"inherited": ("django.db.models.fields.BooleanField", [], {}),
},
}
complete_apps = ["edge"]
| mit | cc591a673ae249a27dc4f526864cbd78 | 36.119617 | 92 | 0.412349 | 4.420513 | false | false | false | false |
ginkgobioworks/edge | src/edge/ssr/crelox.py | 1 | 2387 | from edge.ssr import rc, lower_no_whitespace, Reaction, Integration, Excision, Inversion, RMCE
class Sites(object):
loxP = lower_no_whitespace("ATAACTTCGTATA GCATACAT TATACGAAGTTAT")
lox66 = lower_no_whitespace("ATAACTTCGTATA GCATACAT TATACGAACGGTA")
lox71 = lower_no_whitespace("TACCGTTCGTATA GCATACAT TATACGAAGTTAT")
lox72 = lower_no_whitespace("TACCGTTCGTATA GCATACAT TATACGAACGGTA")
lox5171 = lower_no_whitespace("ATAACTTCGTATA GtAcACAT TATACGAAGTTAT")
lox2272 = lower_no_whitespace("ATAACTTCGTATA GgATACtT TATACGAAGTTAT")
loxm2 = lower_no_whitespace("ATAACTTCGTATA TGGTTTCT TATACGAAGTTAT")
loxm2_66 = lower_no_whitespace("ATAACTTCGTATA TGGTTTCT TATACGAACGGTA")
loxm2_71 = lower_no_whitespace("TACCGTTCGTATA TGGTTTCT TATACGAAGTTAT")
loxm2_72 = lower_no_whitespace("TACCGTTCGTATA TGGTTTCT TATACGAACGGTA")
class CreLoxReaction(Reaction):
@staticmethod
def allowed():
return [
RMCE(Sites.loxP, Sites.lox2272, Sites.loxP, Sites.lox2272, Sites.loxP, Sites.lox2272),
RMCE(Sites.lox2272, Sites.loxP, Sites.lox2272, Sites.loxP, Sites.lox2272, Sites.loxP),
RMCE(Sites.lox66, Sites.lox2272,
Sites.lox71, Sites.lox2272,
Sites.lox72, Sites.lox2272),
RMCE(Sites.lox2272, Sites.lox66,
Sites.lox2272, Sites.lox71,
Sites.lox2272, Sites.lox72),
RMCE(Sites.loxm2_71, Sites.lox66,
Sites.loxm2_66, Sites.lox71,
Sites.loxP, Sites.lox72),
RMCE(Sites.loxm2_66, Sites.lox66,
Sites.loxm2_71, Sites.lox71,
Sites.loxm2_72, Sites.lox72),
Integration(Sites.lox66, Sites.lox71, Sites.lox72, Sites.loxP),
Integration(Sites.lox71, Sites.lox66, Sites.loxP, Sites.lox72),
Excision(Sites.loxP, Sites.loxP, Sites.loxP),
Excision(Sites.lox66, Sites.loxP, Sites.loxP),
Excision(Sites.loxP, Sites.lox66, Sites.lox66),
Excision(Sites.lox71, Sites.loxP, Sites.lox71),
Excision(Sites.loxP, Sites.lox71, Sites.loxP),
Inversion(Sites.loxP, rc(Sites.loxP), Sites.loxP, rc(Sites.loxP)),
Inversion(Sites.lox66, rc(Sites.loxP), Sites.loxP, rc(Sites.lox66)),
Inversion(Sites.lox71, rc(Sites.loxP), Sites.lox71, rc(Sites.loxP)),
]
| mit | 643ac372e6a6edbc223d416490329235 | 45.803922 | 98 | 0.653121 | 2.520591 | false | false | false | false |
ginkgobioworks/edge | src/edge/tests/test_importer.py | 1 | 50909 | # flake8: noqa
import os
import tempfile
import unittest.mock as mock
from django.test import TestCase
from edge import import_gff
from edge.models import Genome
class ImporterTest(TestCase):
def test_import_gff_procedure_creates_genome_and_annotations(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tcds\t30\t80\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\trbs\t20\t28\t.\t+\t.\tID=i3
chrII\tTest\tgene\t40\t60\t.\t-\t.\tID=f4;gene=g4
chrII\tTest\tgene\t20\t80\t.\t+\t.\tID=i5;Name=f5
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
>chrII
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
self.assertEquals(Genome.objects.filter(name="TestGenome").count(), 0)
import_gff("TestGenome", f.name)
self.assertEquals(Genome.objects.filter(name="TestGenome").count(), 1)
# import again with same name does not work
self.assertRaises(Exception, import_gff, "TestGenome", f.name)
# can import again with different name
self.assertEquals(Genome.objects.filter(name="TestGenome2").count(), 0)
import_gff("TestGenome2", f.name)
self.assertEquals(Genome.objects.filter(name="TestGenome2").count(), 1)
genome = Genome.objects.get(name="TestGenome")
os.unlink(f.name)
# created one fragment for each sequence in GFF file
self.assertCountEqual(
[fr.name for fr in genome.fragments.all()], ["chrI", "chrII"]
)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEquals(len(chrI.sequence), 160)
self.assertEquals(len(chrI.annotations()), 2)
chrII = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrII"
][0]
self.assertEquals(len(chrII.sequence), 160)
self.assertEquals(len(chrII.annotations()), 2)
def test_import_gff_creates_fragments_and_annotate_features(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tcds\t30\t80\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\trbs\t20\t28\t.\t+\t.\tID=i3
chrII\tTest\tgene\t40\t60\t.\t-\t.\tID=f4;gene=g4
chrII\tTest\tgene\t20\t80\t.\t+\t.\tID=i5;Name=f5
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
>chrII
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
self.assertCountEqual(
[fr.name for fr in genome.fragments.all()], ["chrI", "chrII"]
)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEquals(len(chrI.sequence), 160)
# verify skips annotation on entire sequence
self.assertEquals(len(chrI.annotations()), 2)
self.assertEquals(chrI.annotations()[0].base_first, 20)
self.assertEquals(chrI.annotations()[0].base_last, 28)
self.assertEquals(
chrI.annotations()[0].feature.name, "i3"
) # no name, loaded ID
self.assertEquals(chrI.annotations()[0].feature.strand, 1)
self.assertEquals(chrI.annotations()[1].base_first, 30)
self.assertEquals(chrI.annotations()[1].base_last, 80)
self.assertEquals(chrI.annotations()[1].feature.name, "f2")
self.assertEquals(chrI.annotations()[1].feature.strand, -1)
# verify chrII fragment
chrII = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrII"
][0]
self.assertEquals(len(chrII.sequence), 160)
# consecutive annotations merged even though they span multiple chunks
self.assertEquals(len(chrII.annotations()), 2)
self.assertEquals(chrII.annotations()[0].base_first, 20)
self.assertEquals(chrII.annotations()[0].base_last, 80)
self.assertEquals(chrII.annotations()[0].feature.name, "f5")
self.assertEquals(chrII.annotations()[0].feature.strand, 1)
self.assertEquals(chrII.annotations()[1].base_first, 40)
self.assertEquals(chrII.annotations()[1].base_last, 60)
self.assertEquals(
chrII.annotations()[1].feature.name, "g4"
) # has gene, use gene name
self.assertEquals(chrII.annotations()[1].feature.strand, -1)
def test_import_feature_starting_at_first_base(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tcds\t1\t80\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\trbs\t20\t28\t.\t+\t.\tID=i3
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEquals(len(chrI.sequence), 160)
# verify skips annotation on entire sequence
self.assertEquals(len(chrI.annotations()), 2)
self.assertEquals(chrI.annotations()[0].base_first, 1)
self.assertEquals(chrI.annotations()[0].base_last, 80)
self.assertEquals(chrI.annotations()[0].feature.name, "f2")
self.assertEquals(chrI.annotations()[0].feature.strand, -1)
self.assertEquals(chrI.annotations()[1].base_first, 20)
self.assertEquals(chrI.annotations()[1].base_last, 28)
self.assertEquals(
chrI.annotations()[1].feature.name, "i3"
) # no name, loaded ID
self.assertEquals(chrI.annotations()[1].feature.strand, 1)
def test_import_partially_overlapping_features(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tcds\t19\t21\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\trbs\t20\t28\t.\t+\t.\tID=i3
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEquals(len(chrI.sequence), 160)
# verify skips annotation on entire sequence
self.assertEquals(len(chrI.annotations()), 2)
self.assertEquals(chrI.annotations()[1].base_first, 20)
self.assertEquals(chrI.annotations()[1].base_last, 28)
self.assertEquals(
chrI.annotations()[1].feature.name, "i3"
) # no name, loaded ID
self.assertEquals(chrI.annotations()[1].feature.strand, 1)
self.assertEquals(chrI.annotations()[0].base_first, 19)
self.assertEquals(chrI.annotations()[0].base_last, 21)
self.assertEquals(chrI.annotations()[0].feature.name, "f2")
self.assertEquals(chrI.annotations()[0].feature.strand, -1)
def test_import_overlapping_features(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tcds\t20\t28\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\trbs\t20\t28\t.\t+\t.\tID=i3
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEquals(len(chrI.sequence), 160)
# verify skips annotation on entire sequence
self.assertEquals(len(chrI.annotations()), 2)
self.assertEquals(chrI.annotations()[1].base_first, 20)
self.assertEquals(chrI.annotations()[1].base_last, 28)
self.assertEquals(
chrI.annotations()[1].feature.name, "i3"
) # no name, loaded ID
self.assertEquals(chrI.annotations()[1].feature.strand, 1)
self.assertEquals(chrI.annotations()[0].base_first, 20)
self.assertEquals(chrI.annotations()[0].base_last, 28)
self.assertEquals(chrI.annotations()[0].feature.name, "f2")
self.assertEquals(chrI.annotations()[0].feature.strand, -1)
def test_import_feature_ending_at_last_base(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tcds\t20\t28\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\trbs\t20\t160\t.\t+\t.\tID=i3
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEquals(len(chrI.sequence), 160)
# verify skips annotation on entire sequence
self.assertEquals(len(chrI.annotations()), 2)
self.assertEquals(chrI.annotations()[1].base_first, 20)
self.assertEquals(chrI.annotations()[1].base_last, 160)
self.assertEquals(
chrI.annotations()[1].feature.name, "i3"
) # no name, loaded ID
self.assertEquals(chrI.annotations()[1].feature.strand, 1)
self.assertEquals(chrI.annotations()[0].base_first, 20)
self.assertEquals(chrI.annotations()[0].base_last, 28)
self.assertEquals(chrI.annotations()[0].feature.name, "f2")
self.assertEquals(chrI.annotations()[0].feature.strand, -1)
def test_import_supports_lcl_fasta_header(self):
data = """##gff-version 3
##sequence-region 1 1540
##species https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=1282
1 Local region 1 1540 . + . ID=1:1..2424096;Is_circular=true;Name=ANONYMOUS;gbkey=Src;genome=chromosome;mol_type=genomic DNA;strain=AZS-SE_43
1 . gene 1 1356 . + . ID=gene-AZS-SE_43_000001;Name=dnaA;gbkey=Gene;gene=dnaA;gene_biotype=protein_coding;locus_tag=AZS-SE_43_000001
##FASTA
>lcl|1 Staphylococcus epidermidis strain AZS-SE_43 chromosome, complete genome
ATGTCAGAGAAAGAAATTTGGGATAAAGTTTTAGAAATTGCCCAGGAAAGAATTTCAAACACTAGTTATC
AAACGTTCATAAAAGATACGCAACTCTACTCACTTAAAAATGACGAAGCCATTATATTAGTAAGTCTGCC
TTTCAATGCGAGTTGGCTTAATCAGCGATATTCAGAAATTATGCAGGCTATTATTTATGATGTCATCGGT
TATGAAGTGAAACCACATTTTATTTCTGAAGATGAACTTGCATCCTACAACAATGTAAATACACAAGAAG
TTCAAGAACCCCAAGTACAACATTCTTCTATAGATGATAAGACTTGGGGAAAAGAACAATTTAATATGCA
CAATACATTCGATACATTTGTCATTGGACCTGGTAACCGTTTCCCACATGCTGCAAGTTTAGCTGTTGCA
GAAGCACCGGCAGAAGCTTATAATCCATTATTTATATATGGAGGCGTAGGTCTAGGTAAAACACATTTAA
TGCATGCAATTGGGCACCATGTTCTTAGCAACAAACCTAATGCTAAAGTCATTTACACTTCTAGTGAGAA
ATTCACAAACGAATTTATTAAATCAATACGTGATAATGAAACTGAAGCATTTCGTGAAAAGTATCGTAAA
ATTGATGTTTTATTAATTGATGATATTCAATTCATTCAAAATAAAGAACAAACGCAAGAAGAGTTCTTCC
ATACTTTTAATGAATTACATCAAAATAATAAGCAAATCGTTATTTCAAGTGATCGTCCACCAAAAGAAAT
TGCTAAGCTGGAAGACCGTCTTCGCTCTCGTTTTGAGTGGGGACTAATAGTTGATATCACGCCACCTGAT
TACGAAACAAGAATGGCAATATTACAAAAGAAAATTGAAGAAGAAAATCTTGATATTCCGCCAGAAGCTT
TGAATTACATCGCCAACCAAATTCAATCAAATATTCGTGAACTTGAAGGCGCATTAACTCGACTTTTAGC
GTACTCCAAATTACAAGGAAAACCTATTACAACCGAACTCACTGCAGAAGCGTTAAAAGATATCATTCAG
TCACCTAAGTCTAAAAAGATTACAATTCAAGATATCCAAAAGGTAGTTGGTCAGTATTATAGTGTAAGAA
TTGAAGATTTTAGTGCCAAAAAACGTACAAAGTCAATTGCTTACCCACGACAAATAGCTATGTATCTATC
TAGAGAATTAACTGATTTTTCATTACCTAAGATAGGTGAAGAATTTGGAGGTCGCGATCATACAACAGTT
ATCCATGCCCATGAAAAGATTGCAAATGATATCAAGTCTGATCCTACATTTAAGCAAGAAGTAGAAAACT
TAGAAAAAGAAATTAGAAATCAGTAATGGGAAATAAATCCATATTGAATAAATTGAAAATTAGCAGATTG
TAAACTGTCTTTTAGGGAATAATTACTATGTAAATCAATTCCTAAACACTTAAAAAGATGGGGAGTTGTC
CCTCTGAATAACAATAGCAATTGTGGATAATGTGAAAAAATAATACACAACATACACAGTTTATCCACAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify first fragment
self.assertEquals(genome.fragments.count(), 1)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "1"
][0]
self.assertEqual(len(chrI.sequence), 1540)
self.assertEqual(len(chrI.annotations()), 1)
def test_import_ignores_bad_sequence_region_line(self):
data = """##gff-version 3
##sequence-region 1 1540
##species https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=1282
1 Local region 1 1540 . + . ID=1:1..2424096;Dbxref=taxon:1282;Is_circular=true;Name=ANONYMOUS;gbkey=Src;genome=chromosome;mol_type=genomic DNA;strain=AZS-SE_43
1 . gene 1 1356 . + . ID=gene-AZS-SE_43_000001;Name=dnaA;gbkey=Gene;gene=dnaA;gene_biotype=protein_coding;locus_tag=AZS-SE_43_000001
##FASTA
>1
ATGTCAGAGAAAGAAATTTGGGATAAAGTTTTAGAAATTGCCCAGGAAAGAATTTCAAACACTAGTTATC
AAACGTTCATAAAAGATACGCAACTCTACTCACTTAAAAATGACGAAGCCATTATATTAGTAAGTCTGCC
TTTCAATGCGAGTTGGCTTAATCAGCGATATTCAGAAATTATGCAGGCTATTATTTATGATGTCATCGGT
TATGAAGTGAAACCACATTTTATTTCTGAAGATGAACTTGCATCCTACAACAATGTAAATACACAAGAAG
TTCAAGAACCCCAAGTACAACATTCTTCTATAGATGATAAGACTTGGGGAAAAGAACAATTTAATATGCA
CAATACATTCGATACATTTGTCATTGGACCTGGTAACCGTTTCCCACATGCTGCAAGTTTAGCTGTTGCA
GAAGCACCGGCAGAAGCTTATAATCCATTATTTATATATGGAGGCGTAGGTCTAGGTAAAACACATTTAA
TGCATGCAATTGGGCACCATGTTCTTAGCAACAAACCTAATGCTAAAGTCATTTACACTTCTAGTGAGAA
ATTCACAAACGAATTTATTAAATCAATACGTGATAATGAAACTGAAGCATTTCGTGAAAAGTATCGTAAA
ATTGATGTTTTATTAATTGATGATATTCAATTCATTCAAAATAAAGAACAAACGCAAGAAGAGTTCTTCC
ATACTTTTAATGAATTACATCAAAATAATAAGCAAATCGTTATTTCAAGTGATCGTCCACCAAAAGAAAT
TGCTAAGCTGGAAGACCGTCTTCGCTCTCGTTTTGAGTGGGGACTAATAGTTGATATCACGCCACCTGAT
TACGAAACAAGAATGGCAATATTACAAAAGAAAATTGAAGAAGAAAATCTTGATATTCCGCCAGAAGCTT
TGAATTACATCGCCAACCAAATTCAATCAAATATTCGTGAACTTGAAGGCGCATTAACTCGACTTTTAGC
GTACTCCAAATTACAAGGAAAACCTATTACAACCGAACTCACTGCAGAAGCGTTAAAAGATATCATTCAG
TCACCTAAGTCTAAAAAGATTACAATTCAAGATATCCAAAAGGTAGTTGGTCAGTATTATAGTGTAAGAA
TTGAAGATTTTAGTGCCAAAAAACGTACAAAGTCAATTGCTTACCCACGACAAATAGCTATGTATCTATC
TAGAGAATTAACTGATTTTTCATTACCTAAGATAGGTGAAGAATTTGGAGGTCGCGATCATACAACAGTT
ATCCATGCCCATGAAAAGATTGCAAATGATATCAAGTCTGATCCTACATTTAAGCAAGAAGTAGAAAACT
TAGAAAAAGAAATTAGAAATCAGTAATGGGAAATAAATCCATATTGAATAAATTGAAAATTAGCAGATTG
TAAACTGTCTTTTAGGGAATAATTACTATGTAAATCAATTCCTAAACACTTAAAAAGATGGGGAGTTGTC
CCTCTGAATAACAATAGCAATTGTGGATAATGTGAAAAAATAATACACAACATACACAGTTTATCCACAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify first fragment
self.assertEquals(genome.fragments.count(), 1)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "1"
][0]
self.assertEqual(len(chrI.sequence), 1540)
self.assertEqual(len(chrI.annotations()), 1)
def test_ignores_first_whole_region_annotation_that_went_beyond_fasta_sequence_length(self):
# there are 1540 bps, but 1541 is used in the first annotation
data = """##gff-version 3
##species https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=1282
1 Local region 1 1541 . + . ID=1:1..2424096;Dbxref=taxon:1282;Is_circular=true;Name=ANONYMOUS;gbkey=Src;genome=chromosome;mol_type=genomic DNA;strain=AZS-SE_43
1 . gene 1 1356 . + . ID=gene-AZS-SE_43_000001;Name=dnaA;gbkey=Gene;gene=dnaA;gene_biotype=protein_coding;locus_tag=AZS-SE_43_000001
##FASTA
>1
ATGTCAGAGAAAGAAATTTGGGATAAAGTTTTAGAAATTGCCCAGGAAAGAATTTCAAACACTAGTTATC
AAACGTTCATAAAAGATACGCAACTCTACTCACTTAAAAATGACGAAGCCATTATATTAGTAAGTCTGCC
TTTCAATGCGAGTTGGCTTAATCAGCGATATTCAGAAATTATGCAGGCTATTATTTATGATGTCATCGGT
TATGAAGTGAAACCACATTTTATTTCTGAAGATGAACTTGCATCCTACAACAATGTAAATACACAAGAAG
TTCAAGAACCCCAAGTACAACATTCTTCTATAGATGATAAGACTTGGGGAAAAGAACAATTTAATATGCA
CAATACATTCGATACATTTGTCATTGGACCTGGTAACCGTTTCCCACATGCTGCAAGTTTAGCTGTTGCA
GAAGCACCGGCAGAAGCTTATAATCCATTATTTATATATGGAGGCGTAGGTCTAGGTAAAACACATTTAA
TGCATGCAATTGGGCACCATGTTCTTAGCAACAAACCTAATGCTAAAGTCATTTACACTTCTAGTGAGAA
ATTCACAAACGAATTTATTAAATCAATACGTGATAATGAAACTGAAGCATTTCGTGAAAAGTATCGTAAA
ATTGATGTTTTATTAATTGATGATATTCAATTCATTCAAAATAAAGAACAAACGCAAGAAGAGTTCTTCC
ATACTTTTAATGAATTACATCAAAATAATAAGCAAATCGTTATTTCAAGTGATCGTCCACCAAAAGAAAT
TGCTAAGCTGGAAGACCGTCTTCGCTCTCGTTTTGAGTGGGGACTAATAGTTGATATCACGCCACCTGAT
TACGAAACAAGAATGGCAATATTACAAAAGAAAATTGAAGAAGAAAATCTTGATATTCCGCCAGAAGCTT
TGAATTACATCGCCAACCAAATTCAATCAAATATTCGTGAACTTGAAGGCGCATTAACTCGACTTTTAGC
GTACTCCAAATTACAAGGAAAACCTATTACAACCGAACTCACTGCAGAAGCGTTAAAAGATATCATTCAG
TCACCTAAGTCTAAAAAGATTACAATTCAAGATATCCAAAAGGTAGTTGGTCAGTATTATAGTGTAAGAA
TTGAAGATTTTAGTGCCAAAAAACGTACAAAGTCAATTGCTTACCCACGACAAATAGCTATGTATCTATC
TAGAGAATTAACTGATTTTTCATTACCTAAGATAGGTGAAGAATTTGGAGGTCGCGATCATACAACAGTT
ATCCATGCCCATGAAAAGATTGCAAATGATATCAAGTCTGATCCTACATTTAAGCAAGAAGTAGAAAACT
TAGAAAAAGAAATTAGAAATCAGTAATGGGAAATAAATCCATATTGAATAAATTGAAAATTAGCAGATTG
TAAACTGTCTTTTAGGGAATAATTACTATGTAAATCAATTCCTAAACACTTAAAAAGATGGGGAGTTGTC
CCTCTGAATAACAATAGCAATTGTGGATAATGTGAAAAAATAATACACAACATACACAGTTTATCCACAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify first fragment
self.assertEquals(genome.fragments.count(), 1)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "1"
][0]
self.assertEqual(len(chrI.sequence), 1540)
self.assertEqual(len(chrI.annotations()), 1)
def test_ignores_annotations_at_end_of_gff_that_go_beyond_fasta_sequence(self):
# there are 1540 bps, but 1541 is used in the last annotation
data = """##gff-version 3
##species https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=1282
1 Local region 1 1540 . + . ID=1:1..2424096;Dbxref=taxon:1282;Is_circular=true;Name=ANONYMOUS;gbkey=Src;genome=chromosome;mol_type=genomic DNA;strain=AZS-SE_43
1 . gene 1 1356 . + . ID=gene-AZS-SE_43_000001;Name=dnaA;gbkey=Gene;gene=dnaA;gene_biotype=protein_coding;locus_tag=AZS-SE_43_000001
1 . gene 1357 1541 . + . ID=gene-AZS-SE_43_000002;Name=dnaA;gbkey=Gene;gene=dnaA;gene_biotype=protein_coding;locus_tag=AZS-SE_43_000002
##FASTA
>1
ATGTCAGAGAAAGAAATTTGGGATAAAGTTTTAGAAATTGCCCAGGAAAGAATTTCAAACACTAGTTATC
AAACGTTCATAAAAGATACGCAACTCTACTCACTTAAAAATGACGAAGCCATTATATTAGTAAGTCTGCC
TTTCAATGCGAGTTGGCTTAATCAGCGATATTCAGAAATTATGCAGGCTATTATTTATGATGTCATCGGT
TATGAAGTGAAACCACATTTTATTTCTGAAGATGAACTTGCATCCTACAACAATGTAAATACACAAGAAG
TTCAAGAACCCCAAGTACAACATTCTTCTATAGATGATAAGACTTGGGGAAAAGAACAATTTAATATGCA
CAATACATTCGATACATTTGTCATTGGACCTGGTAACCGTTTCCCACATGCTGCAAGTTTAGCTGTTGCA
GAAGCACCGGCAGAAGCTTATAATCCATTATTTATATATGGAGGCGTAGGTCTAGGTAAAACACATTTAA
TGCATGCAATTGGGCACCATGTTCTTAGCAACAAACCTAATGCTAAAGTCATTTACACTTCTAGTGAGAA
ATTCACAAACGAATTTATTAAATCAATACGTGATAATGAAACTGAAGCATTTCGTGAAAAGTATCGTAAA
ATTGATGTTTTATTAATTGATGATATTCAATTCATTCAAAATAAAGAACAAACGCAAGAAGAGTTCTTCC
ATACTTTTAATGAATTACATCAAAATAATAAGCAAATCGTTATTTCAAGTGATCGTCCACCAAAAGAAAT
TGCTAAGCTGGAAGACCGTCTTCGCTCTCGTTTTGAGTGGGGACTAATAGTTGATATCACGCCACCTGAT
TACGAAACAAGAATGGCAATATTACAAAAGAAAATTGAAGAAGAAAATCTTGATATTCCGCCAGAAGCTT
TGAATTACATCGCCAACCAAATTCAATCAAATATTCGTGAACTTGAAGGCGCATTAACTCGACTTTTAGC
GTACTCCAAATTACAAGGAAAACCTATTACAACCGAACTCACTGCAGAAGCGTTAAAAGATATCATTCAG
TCACCTAAGTCTAAAAAGATTACAATTCAAGATATCCAAAAGGTAGTTGGTCAGTATTATAGTGTAAGAA
TTGAAGATTTTAGTGCCAAAAAACGTACAAAGTCAATTGCTTACCCACGACAAATAGCTATGTATCTATC
TAGAGAATTAACTGATTTTTCATTACCTAAGATAGGTGAAGAATTTGGAGGTCGCGATCATACAACAGTT
ATCCATGCCCATGAAAAGATTGCAAATGATATCAAGTCTGATCCTACATTTAAGCAAGAAGTAGAAAACT
TAGAAAAAGAAATTAGAAATCAGTAATGGGAAATAAATCCATATTGAATAAATTGAAAATTAGCAGATTG
TAAACTGTCTTTTAGGGAATAATTACTATGTAAATCAATTCCTAAACACTTAAAAAGATGGGGAGTTGTC
CCTCTGAATAACAATAGCAATTGTGGATAATGTGAAAAAATAATACACAACATACACAGTTTATCCACAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify first fragment
self.assertEquals(genome.fragments.count(), 1)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "1"
][0]
self.assertEqual(len(chrI.sequence), 1540)
self.assertEqual(len(chrI.annotations()), 3)
class QualifierTest(TestCase):
def import_with_qualifiers(self, qualifiers, phase="."):
data = """##gff-version 3
chrI\tTest\tcds\t30\t80\t.\t-\t%s\t%s
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
""" % (
phase,
qualifiers,
)
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
self.genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
def test_stores_phase(self):
qualifiers = "name=g2"
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "g2")
self.assertEquals(
chrI.annotations()[0].feature.qualifiers, dict(name=["g2"], source=["Test"])
)
qualifiers = "name=g2"
self.import_with_qualifiers(qualifiers, phase=2)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "g2")
self.assertEquals(
chrI.annotations()[0].feature.qualifiers,
dict(name=["g2"], phase=["2"], source=["Test"]),
)
def test_keeps_qualifiers(self):
qualifiers = "name=g2;locus_tag=b0002;aliases=a,b,c"
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "g2")
self.assertEquals(
chrI.annotations()[0].feature.qualifiers,
dict(
name=["g2"],
locus_tag=["b0002"],
aliases=["a", "b", "c"],
source=["Test"],
),
)
def test_uses_name_qualifier_as_name_over_gene_qualifier(self):
qualifiers = "ID=i2;gene=g2;Name=f2"
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "f2")
def test_uses_name_qualifier_as_name_over_locus_tag_qualifier(self):
qualifiers = "ID=i2;Name=f2;locus_tag=l2"
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "f2")
def test_uses_locus_qualifier_as_name_as_name_over_id(self):
qualifiers = "ID=i2;locus_tag=l2"
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "l2")
def test_uses_id_as_name_if_nothing_else_available(self):
qualifiers = "ID=i2"
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "i2")
def test_uses_feature_type_as_name_if_no_id(self):
qualifiers = ""
self.import_with_qualifiers(qualifiers)
chrI = [
f.indexed_fragment()
for f in self.genome.fragments.all()
if f.name == "chrI"
][0]
self.assertEquals(len(chrI.annotations()), 1)
self.assertEquals(chrI.annotations()[0].feature.name, "cds")
class JoinImporterTest(TestCase):
def test_import_gff_CDS_subfragments(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tgene\t30\t80\t.\t+\t.\tID=i2g;Name=f2g
chrI\tTest\tCDS\t30\t80\t.\t+\t.\tID=i2;Name=f2
chrI\tTest\tCDS\t30\t41\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t50\t55\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t60\t80\t.\t+\t.\tParent=i2
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
self.assertCountEqual(
[fr.name for fr in genome.fragments.all()], ["chrI"]
)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 4)
self.assertEqual(chrI.annotations()[0].base_first, 30)
self.assertEqual(chrI.annotations()[0].base_last, 80)
self.assertEqual(chrI.annotations()[0].feature.name, 'f2g')
self.assertEqual(chrI.annotations()[0].feature.type, 'gene')
self.assertEqual(chrI.annotations()[1].base_first, 30)
self.assertEqual(chrI.annotations()[1].base_last, 41)
self.assertEqual(chrI.annotations()[1].feature.name, 'f2')
self.assertEqual(chrI.annotations()[1].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[2].base_first, 50)
self.assertEqual(chrI.annotations()[2].base_last, 55)
self.assertEqual(chrI.annotations()[2].feature.name, 'f2')
self.assertEqual(chrI.annotations()[2].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[3].base_first, 60)
self.assertEqual(chrI.annotations()[3].base_last, 80)
self.assertEqual(chrI.annotations()[3].feature.name, 'f2')
self.assertEqual(chrI.annotations()[3].feature.type, 'CDS')
def test_import_gff_CDS_subfragments_overlap(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tgene\t30\t80\t.\t+\t.\tID=i2g;Name=f2g
chrI\tTest\tCDS\t30\t80\t.\t+\t.\tID=i2;Name=f2
chrI\tTest\tCDS\t30\t41\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t41\t50\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t56\t61\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t60\t80\t.\t+\t.\tParent=i2
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
self.assertCountEqual(
[fr.name for fr in genome.fragments.all()], ["chrI"]
)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 5)
self.assertEqual(chrI.annotations()[0].base_first, 30)
self.assertEqual(chrI.annotations()[0].base_last, 80)
self.assertEqual(chrI.annotations()[0].feature.name, 'f2g')
self.assertEqual(chrI.annotations()[0].feature.type, 'gene')
self.assertNotEqual(chrI.annotations()[0].feature.id, chrI.annotations()[1].feature.id)
self.assertEqual(chrI.annotations()[1].base_first, 30)
self.assertEqual(chrI.annotations()[1].base_last, 41)
self.assertEqual(chrI.annotations()[1].feature.name, 'f2')
self.assertEqual(chrI.annotations()[1].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[2].base_first, 41)
self.assertEqual(chrI.annotations()[2].base_last, 50)
self.assertEqual(chrI.annotations()[2].feature.name, 'f2')
self.assertEqual(chrI.annotations()[2].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[3].base_first, 56)
self.assertEqual(chrI.annotations()[3].base_last, 61)
self.assertEqual(chrI.annotations()[3].feature.name, 'f2')
self.assertEqual(chrI.annotations()[3].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[4].base_first, 60)
self.assertEqual(chrI.annotations()[4].base_last, 80)
self.assertEqual(chrI.annotations()[4].feature.name, 'f2')
self.assertEqual(chrI.annotations()[4].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[1].feature.id, chrI.annotations()[2].feature.id)
self.assertEqual(chrI.annotations()[2].feature.id, chrI.annotations()[3].feature.id)
self.assertEqual(chrI.annotations()[3].feature.id, chrI.annotations()[4].feature.id)
@mock.patch("edge.models.fragment.Annotation.from_chunk_feature_and_location_array")
def test_import_gff_CDS_subfragments_overlap_check_chunks(self, cf_fcl_mock):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tgene\t30\t80\t.\t+\t.\tID=i2g;Name=f2g
chrI\tTest\tCDS\t30\t80\t.\t+\t.\tID=i2;Name=f2
chrI\tTest\tCDS\t30\t41\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t41\t50\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t56\t61\t.\t+\t.\tParent=i2
chrI\tTest\tCDS\t60\t80\t.\t+\t.\tParent=i2
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
self.assertCountEqual(
[fr.name for fr in genome.fragments.all()], ["chrI"]
)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
chrI.annotations()
gene_cfs = []
cds_cfs = []
args, _ = cf_fcl_mock.call_args
for cf, fcl in args[0]:
if cf.feature.type == 'gene':
gene_cfs.append(cf)
elif cf.feature.type == 'CDS':
cds_cfs.append(cf)
gene_chunk_starts = sorted([cf.fcl_base_first for cf in gene_cfs])
gene_chunk_ends = sorted([cf.fcl_base_last for cf in gene_cfs])
cds_chunk_starts = sorted([cf.fcl_base_first for cf in cds_cfs])
cds_chunk_ends = sorted([cf.fcl_base_last for cf in cds_cfs])
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(gene_chunk_starts, [30, 41, 42, 51, 56, 60, 62])
self.assertEqual(gene_chunk_ends, [40, 41, 50, 55, 59, 61, 80])
self.assertEqual(cds_chunk_starts, [30, 41, 41, 42, 56, 60, 60, 62])
self.assertEqual(cds_chunk_ends, [40, 41, 41, 50, 59, 61, 61, 80])
def test_import_gff_CDS_subfragments_SGD_CDS(self):
data = """##gff-version 3
chrI\tSGD\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tSGD\tgene\t20\t60\t.\t+\t.\tID=A1;Name=A1;gene=gene_A1
chrI\tSGD\tCDS\t20\t37\t.\t+\t0\tParent=A1_mRNA;Name=A1_CDS;orf_classification=Verified
chrI\tSGD\tintron\t38\t39\t.\t+\t.\tParent=A1_mRNA;Name=A1_intron;orf_classification=Verified
chrI\tSGD\tCDS\t40\t60\t.\t+\t0\tParent=A1_mRNA;Name=A1_CDS;orf_classification=Verified
chrI\tSGD\tmRNA\t20\t60\t.\t+\t.\tID=A1_mRNA;Name=A1_mRNA;Parent=A1
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
self.assertCountEqual(
[fr.name for fr in genome.fragments.all()], ["chrI"]
)
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 5)
self.assertEqual(chrI.annotations()[0].base_first, 20)
self.assertEqual(chrI.annotations()[0].base_last, 60)
self.assertEqual(chrI.annotations()[0].feature.name, 'A1')
self.assertEqual(chrI.annotations()[0].feature.type, 'gene')
self.assertNotEqual(chrI.annotations()[0].feature.id, chrI.annotations()[1].feature.id)
self.assertNotEqual(chrI.annotations()[0].feature.id, chrI.annotations()[2].feature.id)
self.assertNotEqual(chrI.annotations()[0].feature.id, chrI.annotations()[3].feature.id)
self.assertEqual(chrI.annotations()[1].base_first, 20)
self.assertEqual(chrI.annotations()[1].base_last, 37)
self.assertEqual(chrI.annotations()[1].feature.name, 'A1_CDS')
self.assertEqual(chrI.annotations()[1].feature.type, 'CDS')
self.assertNotEqual(chrI.annotations()[1].feature.id, chrI.annotations()[2].feature.id)
self.assertNotEqual(chrI.annotations()[1].feature.id, chrI.annotations()[3].feature.id)
self.assertEqual(chrI.annotations()[2].base_first, 20)
self.assertEqual(chrI.annotations()[2].base_last, 60)
self.assertEqual(chrI.annotations()[2].feature.name, 'A1_mRNA')
self.assertEqual(chrI.annotations()[2].feature.type, 'mRNA')
self.assertNotEqual(chrI.annotations()[2].feature.id, chrI.annotations()[3].feature.id)
self.assertEqual(chrI.annotations()[3].base_first, 38)
self.assertEqual(chrI.annotations()[3].base_last, 39)
self.assertEqual(chrI.annotations()[3].feature.name, 'A1_intron')
self.assertEqual(chrI.annotations()[3].feature.type, 'intron')
self.assertEqual(chrI.annotations()[4].base_first, 40)
self.assertEqual(chrI.annotations()[4].base_last, 60)
self.assertEqual(chrI.annotations()[4].feature.name, 'A1_CDS')
self.assertEqual(chrI.annotations()[4].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[1].feature.id, chrI.annotations()[4].feature.id)
def test_import_subfeatures_simple_reverse_coordinates(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tgene\t20\t65\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\tcds\t20\t28\t.\t-\t.\tParent=i2;Name=f2_cds
chrI\tTest\tcds\t58\t65\t.\t-\t.\tParent=i2;Name=f2_cds
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 3)
self.assertEqual(chrI.annotations()[0].base_first, 20)
self.assertEqual(chrI.annotations()[0].base_last, 65)
self.assertEqual(chrI.annotations()[0].feature_base_first, 1)
self.assertEqual(chrI.annotations()[0].feature_base_last, 46)
self.assertEqual(chrI.annotations()[0].feature.name, "f2")
self.assertEquals(chrI.annotations()[0].feature.strand, -1)
self.assertEqual(chrI.annotations()[1].base_first, 20)
self.assertEqual(chrI.annotations()[1].base_last, 28)
self.assertEqual(chrI.annotations()[1].feature_base_first, 9)
self.assertEqual(chrI.annotations()[1].feature_base_last, 17)
self.assertEqual(chrI.annotations()[1].feature.name, "f2_cds")
self.assertEquals(chrI.annotations()[1].feature.strand, -1)
self.assertEqual(chrI.annotations()[2].base_first, 58)
self.assertEqual(chrI.annotations()[2].base_last, 65)
self.assertEqual(chrI.annotations()[2].feature_base_first, 1)
self.assertEqual(chrI.annotations()[2].feature_base_last, 8)
self.assertEqual(chrI.annotations()[2].feature.name, "f2_cds")
self.assertEquals(chrI.annotations()[2].feature.strand, -1)
def test_import_subfeatures_overlap_reverse_coordinates(self):
data = """##gff-version 3
chrI\tTest\tchromosome\t1\t160\t.\t.\t.\tID=i1;Name=f1
chrI\tTest\tgene\t20\t65\t.\t-\t.\tID=i2;Name=f2
chrI\tTest\tcds\t20\t28\t.\t-\t.\tParent=i2;Name=f2_cds
chrI\tTest\tintron\t29\t40\t.\t-\t.\tParent=i2;Name=f2_intron
chrI\tTest\tcds\t41\t60\t.\t-\t.\tParent=i2;Name=f2_cds
chrI\tTest\tcds\t58\t65\t.\t-\t.\tParent=i2;Name=f2_cds
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# verify chrI fragment
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 5)
self.assertEqual(chrI.annotations()[0].base_first, 20)
self.assertEqual(chrI.annotations()[0].base_last, 65)
self.assertEqual(chrI.annotations()[0].feature_base_first, 1)
self.assertEqual(chrI.annotations()[0].feature_base_last, 46)
self.assertEqual(chrI.annotations()[0].feature.name, "f2")
self.assertEquals(chrI.annotations()[0].feature.strand, -1)
self.assertEqual(chrI.annotations()[1].base_first, 20)
self.assertEqual(chrI.annotations()[1].base_last, 28)
self.assertEqual(chrI.annotations()[1].feature_base_first, 29)
self.assertEqual(chrI.annotations()[1].feature_base_last, 37)
self.assertEqual(chrI.annotations()[1].feature.name, "f2_cds")
self.assertEquals(chrI.annotations()[1].feature.strand, -1)
self.assertEqual(chrI.annotations()[2].base_first, 29)
self.assertEqual(chrI.annotations()[2].base_last, 40)
self.assertEqual(chrI.annotations()[2].feature_base_first, 1)
self.assertEqual(chrI.annotations()[2].feature_base_last, 12)
self.assertEqual(chrI.annotations()[2].feature.name, "f2_intron")
self.assertEquals(chrI.annotations()[2].feature.strand, -1)
self.assertEqual(chrI.annotations()[3].base_first, 41)
self.assertEqual(chrI.annotations()[3].base_last, 60)
self.assertEqual(chrI.annotations()[3].feature_base_first, 9)
self.assertEqual(chrI.annotations()[3].feature_base_last, 28)
self.assertEqual(chrI.annotations()[3].feature.name, "f2_cds")
self.assertEquals(chrI.annotations()[3].feature.strand, -1)
self.assertEqual(chrI.annotations()[4].base_first, 58)
self.assertEqual(chrI.annotations()[4].base_last, 65)
self.assertEqual(chrI.annotations()[4].feature_base_first, 1)
self.assertEqual(chrI.annotations()[4].feature_base_last, 8)
self.assertEqual(chrI.annotations()[4].feature.name, "f2_cds")
self.assertEquals(chrI.annotations()[4].feature.strand, -1)
class CircularImporterTest(TestCase):
def test_circular_wrap_around_import(self):
data = """##gff-version 3
chrI\tTest\tregion\t1\t160\t.\t.\t.\tID=i1;Name=f1;Is_circular=True
chrI\tTest\tgene\t150\t10\t.\t+\t.\tID=i2g;Name=f2g
chrI\tTest\tCDS\t150\t160\t.\t+\t.\tName=i2gCDS;Parent=i2g
chrI\tTest\tCDS\t1\t10\t.\t+\t.\tName=i2gCDS;Parent=i2g
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 4)
self.assertEqual(chrI.annotations()[0].base_first, 1)
self.assertEqual(chrI.annotations()[0].base_last, 10)
self.assertEqual(chrI.annotations()[0].feature.name, 'f2g')
self.assertEqual(chrI.annotations()[0].feature.type, 'gene')
self.assertEqual(chrI.annotations()[0].feature_base_first, 12)
self.assertEqual(chrI.annotations()[0].feature_base_last, 21)
self.assertEqual(chrI.annotations()[1].base_first, 1)
self.assertEqual(chrI.annotations()[1].base_last, 10)
self.assertEqual(chrI.annotations()[1].feature.name, 'i2gCDS')
self.assertEqual(chrI.annotations()[1].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[1].feature_base_first, 12)
self.assertEqual(chrI.annotations()[1].feature_base_last, 21)
self.assertEqual(chrI.annotations()[2].base_first, 150)
self.assertEqual(chrI.annotations()[2].base_last, 160)
self.assertEqual(chrI.annotations()[2].feature.name, 'f2g')
self.assertEqual(chrI.annotations()[2].feature.type, 'gene')
self.assertEqual(chrI.annotations()[2].feature_base_first, 1)
self.assertEqual(chrI.annotations()[2].feature_base_last, 11)
self.assertEqual(chrI.annotations()[3].base_first, 150)
self.assertEqual(chrI.annotations()[3].base_last, 160)
self.assertEqual(chrI.annotations()[3].feature.name, 'i2gCDS')
self.assertEqual(chrI.annotations()[3].feature.type, 'CDS')
self.assertEqual(chrI.annotations()[3].feature_base_first, 1)
self.assertEqual(chrI.annotations()[3].feature_base_last, 11)
def test_circular_over_end_import(self):
data = """##gff-version 3
chrI\tTest\tregion\t1\t160\t.\t.\t.\tID=i1;Name=f1;Is_circular=True
chrI\tTest\tgene\t150\t170\t.\t+\t.\tID=i2g;Name=f2g
###
##FASTA
>chrI
CCACACCACACCCACACACCCACACACCACACCACACACCACACCACACCCACACACACACATCCTAACACTACCCTAAC
ACAGCCCTAATCTAACCCTGGCCAACCTGTCTCTCAACTTACCCTCCATTACCCTGCCTCCACTCGTTACCCTGTCCCAT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
# created one fragment for each sequence in GFF file
chrI = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "chrI"
][0]
self.assertEqual(len(chrI.sequence), 160)
self.assertEqual(len(chrI.annotations()), 2)
self.assertEqual(chrI.annotations()[0].base_first, 1)
self.assertEqual(chrI.annotations()[0].base_last, 10)
self.assertEqual(chrI.annotations()[0].feature.name, 'f2g')
self.assertEqual(chrI.annotations()[0].feature.type, 'gene')
self.assertEqual(chrI.annotations()[0].feature_base_first, 12)
self.assertEqual(chrI.annotations()[0].feature_base_last, 21)
self.assertEqual(chrI.annotations()[1].base_first, 150)
self.assertEqual(chrI.annotations()[1].base_last, 160)
self.assertEqual(chrI.annotations()[1].feature.name, 'f2g')
self.assertEqual(chrI.annotations()[1].feature.type, 'gene')
self.assertEqual(chrI.annotations()[1].feature_base_first, 1)
self.assertEqual(chrI.annotations()[1].feature_base_last, 11)
class SpecialCaseImport(TestCase):
def test_rbs_slippage_annotation_import(self):
data = """##gff-version 3
#!gff-spec-version 1.21
#!processor NCBI annotwriter
##sequence-region 1 1 210
##species https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=1282
1\tLocal\tregion\t1\t210\t.\t+\t.\tID=1:1..210;Dbxref=taxon:1282;Is_circular=true;Name=ANONYMOUS;\
gbkey=Src;genome=chromosome;mol_type=genomic DNA;strain=AZS-SE_43
1\t.\tgene\t19\t181\t.\t-\t.\tID=gene-AZS-SE_43_001895;Name=prfB;gbkey=Gene;gene=prfB;\
gene_biotype=protein_coding;locus_tag=AZS-SE_43_001895
1\tProtein Homology\tCDS\t101\t181\t.\t-\t0\tID=cds-AZS-SE_43_001895;Parent=gene-AZS-SE_43_001895;\
Name=extdb:AZS-SE_43_001895;Note=programmed frameshift;exception=ribosomal slippage;gbkey=CDS;\
gene=prfB;inference=COORDINATES: similar to AA sequence:RefSeq:WP_010959142.1;locus_tag=AZS-SE_43_001895;\
product=peptide chain release factor 2;protein_id=extdb:AZS-SE_43_001895;transl_table=11
1\tProtein Homology\tCDS\t19\t99\t.\t-\t0\tID=cds-AZS-SE_43_001895;Parent=gene-AZS-SE_43_001895;\
Name=extdb:AZS-SE_43_001895;Note=programmed frameshift;exception=ribosomal slippage;gbkey=CDS;\
gene=prfB;inference=COORDINATES: similar to AA sequence:RefSeq:WP_010959142.1;locus_tag=AZS-SE_43_001895;\
product=peptide chain release factor 2;protein_id=extdb:AZS-SE_43_001895;transl_table=11
##FASTA
>1
ATGTCAGAGAAAGAAATTTGGGATAAAGTTTTAGAAATTGCCCAGGAAAGAATTTCAAACACTAGTTATC
AAACGTTCATAAAAGATACGCAACTCTACTCACTTAAAAATGACGAAGCCATTATATTAGTAAGTCTGCC
TTTCAATGCGAGTTGGCTTAATCAGCGATATTCAGAAATTATGCAGGCTATTATTTATGATGTCATCGGT
"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
contig_1 = [
fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "1"
][0]
self.assertEqual(len(contig_1.sequence), 210)
self.assertEqual(len(contig_1.annotations()), 3)
self.assertEqual(contig_1.annotations()[0].base_first, 19)
self.assertEqual(contig_1.annotations()[0].base_last, 181)
self.assertEqual(contig_1.annotations()[0].feature_base_first, 1)
self.assertEqual(contig_1.annotations()[0].feature_base_last, 163)
self.assertEqual(contig_1.annotations()[0].feature.name, "prfB")
self.assertEquals(contig_1.annotations()[0].feature.strand, -1)
self.assertEqual(contig_1.annotations()[1].base_first, 19)
self.assertEqual(contig_1.annotations()[1].base_last, 99)
self.assertEqual(contig_1.annotations()[1].feature_base_first, 82)
self.assertEqual(contig_1.annotations()[1].feature_base_last, 162)
self.assertEqual(contig_1.annotations()[1].feature.name, "extdb:AZS-SE_43_001895")
self.assertEquals(contig_1.annotations()[1].feature.strand, -1)
self.assertEqual(contig_1.annotations()[2].base_first, 101)
self.assertEqual(contig_1.annotations()[2].base_last, 181)
self.assertEqual(contig_1.annotations()[2].feature_base_first, 1)
self.assertEqual(contig_1.annotations()[2].feature_base_last, 81)
self.assertEqual(contig_1.annotations()[2].feature.name, "extdb:AZS-SE_43_001895")
self.assertEquals(contig_1.annotations()[2].feature.strand, -1)
def test_full_rbs_slippage_annotation_import(self):
genome2 = Genome.import_gff("Foo1", "edge/tests/fixtures/AZS-ribosomal.gff")
only_contig = [fr.indexed_fragment() for fr in genome2.fragments.all() if fr.name == "1"][0]
anns = only_contig.annotations()
self.assertEqual(len(anns), 4565)
def test_none_strand_import(self):
data = """##gff-version 3
##source-version geneious 2022.0.1
##sequence-region sHU0003.g1 1 140
sHU0003.g1 Geneious region 1 140 . + 0 Is_circular=true
sHU0003.g1 Geneious terminator 5 22 . . . Name=tonB
##FASTA
>1
ATGTCAGAGAAAGAAATTTGGGATAAAGTTTTAGAAATTGCCCAGGAAAGAATTTCAAACACTAGTTATC
AAACGTTCATAAAAGATACGCAACTCTACTCACTTAAAAATGACGAAGCCATTATATTAGTAAGTCTGCC"""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write(data)
f.close()
genome = Genome.import_gff("Foo", f.name)
os.unlink(f.name)
only_contig = [fr.indexed_fragment() for fr in genome.fragments.all() if fr.name == "sHU0003.g1"][0]
anns = only_contig.annotations()
self.assertEqual(len(anns), 1) | mit | ad49bed400bf513e61e39308c6a1daa4 | 46.182576 | 159 | 0.698521 | 2.918759 | false | true | false | false |
ginkgobioworks/edge | src/edge/models/genome_updater.py | 1 | 2981 | from contextlib import contextmanager
from edge.models.fragment import Fragment
class Genome_Updater(object):
"""
Mixin with helpers for updating genome.
"""
@contextmanager
def annotate_fragment_by_name(self, name):
f = [x for x in self.fragments.all() if x.name == name]
if len(f) != 1:
raise Exception("Zero or more than one fragments have name %s" % (name,))
u = f[0].indexed_fragment()
yield u
@contextmanager
def annotate_fragment_by_fragment_id(self, fragment_id):
f = [x for x in self.fragments.all() if x.id == fragment_id]
if len(f) != 1:
raise Exception(
"Zero or more than one fragments have ID %s" % (fragment_id,)
)
u = f[0].indexed_fragment()
yield u
@contextmanager
def update_fragment_by_name(self, name, new_name=None):
if self.parent is None:
raise Exception(
"Cannot update fragment without a parent genome. Try editing instead."
)
f = [x for x in self.fragments.filter(name=name)]
if len(f) != 1:
raise Exception("Zero or more than one fragments have name %s" % (name,))
new_name = name if new_name is None else new_name
u = f[0].indexed_fragment().update(new_name)
yield u
self._add_updated_fragment(u)
@contextmanager
def update_fragment_by_fragment_id(
self, fragment_id, new_name=None, new_fragment=True
):
if self.parent is None:
raise Exception(
"Cannot update fragment without a parent genome. Try editing instead."
)
f = [x for x in self.fragments.filter(id=fragment_id)]
if len(f) != 1:
raise Exception(
"Zero or more than one fragments have ID %s" % (fragment_id,)
)
new_name = f[0].name if new_name is None else new_name
u = f[0].indexed_fragment()
if new_fragment is True:
u = u.update(new_name)
yield u
if new_fragment is True:
self._add_updated_fragment(u)
def add_fragment(self, name, sequence, circular=False, dirn='.'):
if len(sequence) == 0:
raise Exception("Cannot create a fragment of length zero")
new_fragment = Fragment.create_with_sequence(
name=name, sequence=sequence, circular=circular, dirn=dirn
)
self.genome_fragment_set.create(fragment=new_fragment, inherited=False)
return new_fragment
def _add_updated_fragment(self, fragment):
existing_fragment_ids = [f.id for f in self.fragments.all()]
if fragment.parent_id in existing_fragment_ids:
gf = self.genome_fragment_set.get(fragment=fragment.parent)
gf.fragment = fragment
gf.inherited = False
gf.save()
else:
raise Exception("Fragment parent not part of the genome")
| mit | 867eb981de64484cd0d6663587627a88 | 36.2625 | 86 | 0.588393 | 4.01752 | false | false | false | false |
ginkgobioworks/edge | src/edge/tests/test_views.py | 1 | 43035 | import json
import re
from django import forms
from django.test import TestCase
from django.urls import reverse
import unittest.mock as mock
from edge.models import Genome, Fragment, Genome_Fragment
class GenomeListTest(TestCase):
def test_empty_db(self):
url = reverse("genome_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(json.loads(res.content), [])
def test_add_genome(self):
url = reverse("genome_list")
res = self.client.post(
url,
data=json.dumps(dict(name="foo", notes="bar")),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
uri = json.loads(res.content)["uri"]
m = re.match(r"^/edge/genomes/(\d+)/$", uri)
genome_id = int(m.group(1))
self.assertNotEqual(re.match(r"^/edge/genomes/\d+/$", uri), None)
self.assertEquals(
json.loads(res.content),
{
"fragments": [],
"id": genome_id,
"name": "foo",
"notes": "bar",
"parent_id": None,
"parent_name": "",
"uri": uri,
},
)
def test_derives_new_genome_after_adding_fragment(self):
g1 = Genome(name="Foo")
g1.save()
url = reverse("derive-genome-with-new-fragments", kwargs={"genome_id": g1.id})
res = self.client.post(
url,
data=json.dumps(
[{"name": "test-fragment", "sequence": "AGCTAGCTTCGATCGA"}]
),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
child = Genome.objects.get(parent=g1.id)
self.assertNotEquals(child.id, g1.id)
fragment = child.fragments.first()
self.assertEquals(
json.loads(res.content),
{
"fragments": [
{
"id": fragment.id,
"uri": "/edge/fragments/{}/".format(fragment.id),
"name": fragment.name,
"circular": fragment.circular,
"parent_id": None,
"length": 16,
}
],
"id": child.id,
"name": child.name,
"notes": None,
"parent_id": g1.id,
"parent_name": g1.name,
"uri": "/edge/genomes/{}/".format(child.id),
},
)
def test_doesnt_derives_new_genome_on_invalid_fragment(self):
g1 = Genome(name="Foo")
g1.save()
url = reverse("derive-genome-with-new-fragments", kwargs={"genome_id": g1.id})
with self.assertRaises(forms.ValidationError) as exception:
self.client.post(
url,
data=json.dumps(
[
{"name": "valid-fragment", "sequence": "AGCTAGCTTCGATCGA"},
{"name": "invalid-fragment",},
]
),
content_type="application/json",
)
self.assertIn("sequence", exception.exception.error_dict)
# Ensure that when an error is hit, no child genome was derived from the initially
# valid fragments
self.assertEqual(Genome.objects.filter(parent=g1.id).count(), 0)
def test_derives_new_genome_with_multiple_fragments(self):
g1 = Genome(name="Foo")
g1.save()
url = reverse("derive-genome-with-new-fragments", kwargs={"genome_id": g1.id})
res = self.client.post(
url,
data=json.dumps(
[
{"name": "test-fragment", "sequence": "AGCTAGCTTCGATCGA"},
{
"name": "circular-fragment",
"sequence": "AGCTAGCTTCGATCGAAGCTATTATATCGATA",
"circular": True,
},
]
),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
child = Genome.objects.get(parent=g1.id)
self.assertNotEquals(child.id, g1.id)
fragments = [
{
"id": fragment.id,
"uri": "/edge/fragments/{}/".format(fragment.id),
"name": fragment.name,
"circular": fragment.circular,
"parent_id": None,
"length": fragment.indexed_fragment().length,
}
for fragment in child.fragments.all()
]
self.assertEquals(
json.loads(res.content),
{
"fragments": fragments,
"id": child.id,
"name": child.name,
"notes": None,
"parent_id": g1.id,
"parent_name": g1.name,
"uri": "/edge/genomes/{}/".format(child.id),
},
)
def test_can_use_uri_from_add_genome_to_fetch_genome(self):
url = reverse("genome_list")
res = self.client.post(
url,
data=json.dumps(dict(name="foo", notes="bar")),
content_type="application/json",
)
re2 = self.client.get(json.loads(res.content)["uri"])
self.assertEquals(re2.status_code, 200)
self.assertEquals(json.loads(res.content), json.loads(re2.content))
def test_finds_genomes_with_specified_fragment_ids(self):
g1 = Genome(name="Foo")
g1.save()
g2 = Genome(name="Bar")
g2.save()
f1 = Fragment(circular=True, name="FooF1")
f1.save()
f2 = Fragment(circular=True, name="FooF2")
f2.save()
f3 = Fragment(circular=True, name="FooF3", parent=f2)
f3.save()
Genome_Fragment(genome=g1, fragment=f1, inherited=False).save()
Genome_Fragment(genome=g1, fragment=f2, inherited=False).save()
Genome_Fragment(genome=g2, fragment=f1, inherited=True).save()
Genome_Fragment(genome=g2, fragment=f3, inherited=False).save()
# no filter, return both genomes
url = reverse("genome_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["id"] for g in d], [g1.id, g2.id])
# looking for f1 and f2
res = self.client.get("%s?f=%d&f=%d" % (url, f1.id, f2.id))
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["id"] for g in d], [g1.id])
# looking for f1 and f3
res = self.client.get("%s?f=%d&f=%d" % (url, f1.id, f3.id))
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["id"] for g in d], [g2.id])
# looking for f2 and f3
res = self.client.get("%s?f=%d&f=%d" % (url, f2.id, f3.id))
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEquals(d, [])
# looking for f1
res = self.client.get("%s?f=%d" % (url, f1.id,))
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEquals(d, [])
# bad input, return []
res = self.client.get("%s?f=[1,2,3]" % url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEquals(d, [])
def test_finds_genomes_with_name(self):
from edge.models import Genome
a = Genome(name="Foo")
a.save()
Genome(name="Bar %s" % a.id).save()
# no filter, return both genomes
url = reverse("genome_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["name"] for g in d], ["Foo", "Bar %s" % a.id])
# finds genome by ID and query
res = self.client.get("%s?q=%s" % (url, a.id))
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["name"] for g in d], ["Foo", "Bar %s" % a.id])
# finds one
res = self.client.get("%s?q=oo" % url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["name"] for g in d], ["Foo"])
# finds none
res = self.client.get("%s?q=ooo" % url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertCountEqual([g["name"] for g in d], [])
def test_genome_list_paginates(self):
from edge.models import Genome
Genome(name="Foo").save()
Genome(name="Bar").save()
Genome(name="Far").save()
Genome(name="Baz").save()
url = reverse("genome_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEqual(len(d), 4)
res = self.client.get("%s?s=1" % url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEqual(len(d), 3)
res = self.client.get("%s?s=1&p=2" % url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEqual(len(d), 2)
res = self.client.get("%s?p=2" % url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEqual(len(d), 2)
def test_genome_list_does_not_return_inactive_genomes(self):
from edge.models import Genome
Genome(name="Foo", active=False).save()
Genome(name="Bar", active=False).save()
Genome(name="Far").save()
Genome(name="Baz").save()
url = reverse("genome_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
d = json.loads(res.content)
self.assertEqual(len(d), 2)
self.assertEqual(b"Foo" in res.content, False)
self.assertEqual(b"Bar" in res.content, False)
self.assertEqual(b"Far" in res.content, True)
self.assertEqual(b"Baz" in res.content, True)
class GenomeTest(TestCase):
def setUp(self):
url = reverse("genome_list")
res = self.client.post(
url,
data=json.dumps(dict(name="foo", notes="bar")),
content_type="application/json",
)
uri = json.loads(res.content)["uri"]
m = re.match(r"^/edge/genomes/(\d+)/$", uri)
self.assertNotEqual(m, None)
self.genome_uri = uri
self.genome_id = int(m.group(1))
def test_get_genome(self):
res = self.client.get(self.genome_uri)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
{
"fragments": [],
"id": self.genome_id,
"name": "foo",
"notes": "bar",
"parent_id": None,
"parent_name": "",
"uri": self.genome_uri,
},
)
def test_returns_404_if_genome_does_not_exist(self):
url = reverse("genome", kwargs=dict(genome_id=98765))
res = self.client.get(url)
self.assertEquals(res.status_code, 404)
def test_add_non_circular_fragment(self):
data = dict(name="chrI", sequence="AGCTAGCTTCGATCGA")
res = self.client.post(
self.genome_uri + "fragments/",
data=json.dumps(data),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
uri = json.loads(res.content)["uri"]
m = re.match(r"^/edge/fragments/(\d+)/$", uri)
fragment_id = int(m.group(1))
self.assertEquals(
json.loads(res.content),
{
"circular": False,
"id": fragment_id,
"length": len(data["sequence"]),
"name": data["name"],
"uri": uri,
"parent_id": None,
},
)
def test_add_circular_fragment(self):
data = dict(name="chrI", sequence="AGCTAGCTTCGATCGA", circular=True)
res = self.client.post(
self.genome_uri + "fragments/",
data=json.dumps(data),
content_type="application/json",
)
self.assertEquals(res.status_code, 201)
uri = json.loads(res.content)["uri"]
m = re.match(r"^/edge/fragments/(\d+)/$", uri)
fragment_id = int(m.group(1))
self.assertEquals(
json.loads(res.content),
{
"circular": True,
"id": fragment_id,
"length": len(data["sequence"]),
"name": data["name"],
"uri": uri,
"parent_id": None,
},
)
def test_get_genome_returns_fragments(self):
data = dict(name="chrI", sequence="AGCTAGCTTCGATCGA", circular=True)
res = self.client.post(
self.genome_uri + "fragments/",
data=json.dumps(data),
content_type="application/json",
)
uri = json.loads(res.content)["uri"]
m = re.match(r"^/edge/fragments/(\d+)/$", uri)
fragment_id = int(m.group(1))
res = self.client.get(self.genome_uri)
self.assertEquals(
json.loads(res.content)["fragments"],
[
{
"circular": True,
"id": fragment_id,
"length": len(data["sequence"]),
"name": data["name"],
"uri": uri,
"parent_id": None,
}
],
)
def test_can_use_uri_from_add_fragment_to_fetch_fragment(self):
data = dict(name="chrI", sequence="AGCTAGCTTCGATCGA")
res = self.client.post(
self.genome_uri + "fragments/",
data=json.dumps(data),
content_type="application/json",
)
re2 = self.client.get(json.loads(res.content)["uri"])
self.assertEquals(re2.status_code, 200)
self.assertEquals(json.loads(res.content), json.loads(re2.content))
class FragmentTest(TestCase):
def setUp(self):
url = reverse("fragment_list")
self.sequence = "AGCTAGCTTCGATCGA"
self.name = "foo"
res = self.client.post(
url,
data=json.dumps(dict(name=self.name, sequence=self.sequence)),
content_type="application/json",
)
uri = json.loads(res.content)["uri"]
m = re.match(r"^/edge/fragments/(\d+)/$", uri)
self.fragment_id = int(m.group(1))
self.uri = uri
def test_get_all_user_defined_fragments(self):
url = reverse("fragment_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"id": self.fragment_id,
"name": self.name,
"length": len(self.sequence),
"uri": self.uri,
"circular": False,
"parent_id": None,
}
],
)
def test_does_not_return_genomic_fragments(self):
from edge.models import Genome, Genome_Fragment
url = reverse("fragment_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), 1)
g = Genome(name="Foo")
g.save()
Genome_Fragment(genome=g, fragment_id=self.fragment_id, inherited=False).save()
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), 0)
def test_does_not_return_inactive_fragments(self):
from edge.models import Fragment
url = reverse("fragment_list")
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), 1)
fragment = Fragment.objects.get(pk=self.fragment_id)
fragment.active = False
fragment.save()
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), 0)
def test_get_fragment(self):
res = self.client.get(self.uri)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
{
"id": self.fragment_id,
"name": self.name,
"length": len(self.sequence),
"uri": self.uri,
"circular": False,
"parent_id": None,
},
)
def test_get_fragment_sequence(self):
url = reverse("fragment_sequence", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
{
"sequence": self.sequence,
"base_first": 1,
"base_last": len(self.sequence),
},
)
def test_get_fragment_sequence_by_position(self):
url = reverse("fragment_sequence", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.get("%s?f=3&l=10" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
{"sequence": self.sequence[2:10], "base_first": 3, "base_last": 10},
)
def test_returns_404_if_fragment_does_not_exist(self):
url = reverse("fragment", kwargs=dict(fragment_id=98765))
res = self.client.get(url)
self.assertEquals(res.status_code, 404)
def test_add_annotation_on_forward_strand(self):
data = dict(base_first=2, base_last=9, name="proC", type="promoter", strand=1)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
self.assertEquals(res.status_code, 201)
self.assertEquals(json.loads(res.content), {})
res = self.client.get(self.uri + "annotations/")
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 2,
"base_last": 9,
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 8,
"feature": {
"id": mock.ANY,
"length": 8,
"name": "proC",
"type": "promoter",
"qualifiers": {}
},
"name": "proC",
"type": "promoter",
"feature_full_length": 8,
"qualifiers": {},
}
],
)
def test_add_annotation_with_qualifiers(self):
data = dict(
base_first=2,
base_last=9,
name="proC",
type="promoter",
strand=1,
qualifiers=dict(gene="PROC"),
)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
self.assertEquals(res.status_code, 201)
self.assertEquals(json.loads(res.content), {})
res = self.client.get(self.uri + "annotations/")
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 2,
"base_last": 9,
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 8,
"feature": {
"id": mock.ANY,
"length": 8,
"name": "proC",
"type": "promoter",
"qualifiers": {"gene": "PROC"}
},
"name": "proC",
"type": "promoter",
"qualifiers": {"gene": "PROC"},
"feature_full_length": 8,
}
],
)
def test_add_annotation_on_reverse_strand(self):
data = dict(base_first=3, base_last=10, name="proC", type="promoter", strand=-1)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
self.assertEquals(res.status_code, 201)
self.assertEquals(json.loads(res.content), {})
res = self.client.get(self.uri + "annotations/")
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 3,
"base_last": 10,
"strand": -1,
"feature_base_first": 1,
"feature_base_last": 8,
"feature": {
"id": mock.ANY,
"length": 8,
"name": "proC",
"type": "promoter",
"qualifiers": {}
},
"name": "proC",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 8,
}
],
)
def test_annotate_multiple_chunks_with_qualifiers(self):
data = dict(
bases=((2, 4), (6, 9)),
name="proC",
type="promoter",
strand=1,
qualifiers=dict(gene="PROC"),
)
url = reverse(
"fragment_annotate_chunks", kwargs=dict(fragment_id=self.fragment_id)
)
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
self.assertEquals(res.status_code, 201)
self.assertEquals(json.loads(res.content), {})
res = self.client.get(self.uri + "annotations/")
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 2,
"base_last": 4,
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 3,
"feature": {
"id": mock.ANY,
"length": 7,
"name": "proC",
"type": "promoter",
"qualifiers": {"gene": "PROC"}
},
"name": "proC",
"type": "promoter",
"qualifiers": {"gene": "PROC"},
"feature_full_length": 7,
},
{
"base_first": 6,
"base_last": 9,
"strand": 1,
"feature_base_first": 4,
"feature_base_last": 7,
"feature": {
"id": mock.ANY,
"length": 7,
"name": "proC",
"type": "promoter",
"qualifiers": {"gene": "PROC"}
},
"name": "proC",
"type": "promoter",
"qualifiers": {"gene": "PROC"},
"feature_full_length": 7,
},
],
)
def test_get_multiple_annotations_that_are_overlapping(self):
data = dict(base_first=2, base_last=9, name="proC", type="promoter", strand=1)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
data = dict(base_first=3, base_last=10, name="proD", type="promoter", strand=-1)
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
res = self.client.get(self.uri + "annotations/")
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 2,
"base_last": 9,
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 8,
"feature": {
"id": mock.ANY,
"length": 8,
"name": "proC",
"type": "promoter",
"qualifiers": {}
},
"name": "proC",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 8,
},
{
"base_first": 3,
"base_last": 10,
"strand": -1,
"feature_base_first": 1,
"feature_base_last": 8,
"feature": {
"id": mock.ANY,
"length": 8,
"name": "proD",
"type": "promoter",
"qualifiers": {}
},
"name": "proD",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 8,
},
],
)
def test_get_annotations_by_region(self):
data = dict(base_first=2, base_last=8, name="proC", type="promoter", strand=1)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
data = dict(
base_first=10, base_last=13, name="proD", type="promoter", strand=-1
)
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 2,
"base_last": 8,
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 7,
"feature": {
"id": mock.ANY,
"length": 7,
"name": "proC",
"type": "promoter",
"qualifiers": {}
},
"name": "proC",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 7,
},
{
"base_first": 10,
"base_last": 13,
"strand": -1,
"feature_base_first": 1,
"feature_base_last": 4,
"feature": {
"id": mock.ANY,
"length": 4,
"name": "proD",
"type": "promoter",
"qualifiers": {}
},
"name": "proD",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 4,
},
],
)
res = self.client.get("%s?f=8&l=14" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 2,
"base_last": 8,
"feature": {
"id": mock.ANY,
"length": 7,
"name": "proC",
"type": "promoter",
"qualifiers": {}
},
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 7,
"name": "proC",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 7,
},
{
"base_first": 10,
"base_last": 13,
"strand": -1,
"feature_base_first": 1,
"feature_base_last": 4,
"feature": {
"id": mock.ANY,
"length": 4,
"name": "proD",
"type": "promoter",
"qualifiers": {}
},
"name": "proD",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 4,
},
],
)
res = self.client.get("%s?f=9&l=14" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
{
"base_first": 10,
"base_last": 13,
"strand": -1,
"feature_base_first": 1,
"feature_base_last": 4,
"feature": {
"id": mock.ANY,
"length": 4,
"name": "proD",
"type": "promoter",
"qualifiers": {}
},
"name": "proD",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 4,
}
],
)
def test_limit_max_annotations_to_fetch(self):
from edge.models import Fragment
import random
fragment = Fragment.objects.get(pk=self.fragment_id)
fragment = fragment.indexed_fragment()
flen = fragment.length
nannotations = 10
# create some annotations
for n in range(0, nannotations):
bf = random.randint(1, flen)
bl = random.randint(bf, flen)
fragment.annotate(bf, bl, "Feature %s" % (n,), "Feature", 1)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), nannotations)
# limit number of annotations
res = self.client.get("%s?m=1" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), 1)
res = self.client.get("%s?m=%s" % (url, nannotations,))
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), nannotations)
res = self.client.get("%s?m=0" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(len(json.loads(res.content)), 0)
class GenomeAnnotationsTest(TestCase):
def setUp(self):
url = reverse("genome_list")
res = self.client.post(
url,
data=json.dumps(dict(name="foo", notes="bar")),
content_type="application/json",
)
self.genome_id = json.loads(res.content)["id"]
self.genome_uri = json.loads(res.content)["uri"]
data = dict(name="chrI", sequence="AGCTAGCTTCGATCGA")
url = reverse("genome_fragments", kwargs=dict(genome_id=self.genome_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
self.fragment_uri = json.loads(res.content)["uri"]
self.fragment_data = json.loads(res.content)
m = re.match(r"^/edge/fragments/(\d+)/$", self.fragment_uri)
self.fragment_id = int(m.group(1))
def test_returns_errors_if_genome_not_indexed_and_creates_indexed_genome(self):
genome = Genome.objects.get(pk=self.genome_id)
for fr in genome.fragments.all():
fr.fragment_chunk_location_set.all().delete()
# not indexed before call
self.assertEquals(genome.has_location_index, False)
# gets 200 with error
url = reverse("genome_annotations", kwargs=dict(genome_id=self.genome_id))
res = self.client.get("%s?q=proC" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
"Missing genome indices" in json.loads(res.content)["error"], True
)
# in test mode, the .delay call via celery is inlined
genome = Genome.objects.get(pk=self.genome_id)
self.assertEquals(genome.has_location_index, True)
def test_find_annotation(self):
data = dict(base_first=2, base_last=9, name="proC", type="promoter", strand=1)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
url = reverse("genome_annotations", kwargs=dict(genome_id=self.genome_id))
res = self.client.get("%s?q=proC" % url)
self.assertEquals(res.status_code, 200)
self.assertEquals(
json.loads(res.content),
[
[
self.fragment_data,
[
{
"base_first": 2,
"base_last": 9,
"feature": {
"id": mock.ANY,
"length": 8,
"name": "proC",
"type": "promoter",
"qualifiers": {}
},
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 8,
"name": "proC",
"type": "promoter",
"qualifiers": {},
"feature_full_length": 8,
}
],
]
],
)
def test_find_annotation_by_qualifier_field(self):
data = dict(
base_first=2,
base_last=4,
name="Some annotation",
type="promoter",
strand=1,
qualifiers=dict(product=["Foobar"]),
)
url = reverse("fragment_annotations", kwargs=dict(fragment_id=self.fragment_id))
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
data = dict(
base_first=5,
base_last=7,
name="Another annotation",
type="promoter",
strand=1,
qualifiers=dict(product=["Atg20p"]),
)
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
url = reverse("genome_annotations", kwargs=dict(genome_id=self.genome_id))
res = self.client.get("%s?q=Atg20p&field=product" % url)
self.assertEquals(res.status_code, 200)
print(json.loads(res.content)[0])
self.assertEquals(
json.loads(res.content),
[
[
self.fragment_data,
[
{
"base_first": 5,
"base_last": 7,
"feature": {
"id": mock.ANY,
"length": 3,
"name": "Another annotation",
"type": "promoter",
"qualifiers": {"product": ["Atg20p"]}
},
"strand": 1,
"feature_base_first": 1,
"feature_base_last": 3,
"name": "Another annotation",
"type": "promoter",
"qualifiers": {"product": ["Atg20p"]},
"feature_full_length": 3,
}
],
]
],
)
class GenomeImportTest(TestCase):
def test_import_works(self):
with open("edge/tests/fixtures/ecoli-mg1655-simple.gff") as fp:
url = reverse("import")
res = self.client.post(url, {"name": "ecoli", "attachment": fp})
self.assertEquals(len(json.loads(res.content)["imported_genomes"]), 1)
class GenomeDiffView(TestCase):
def setUp(self):
genome = Genome.create("Foo")
s = "atggcatattcgcagct"
f1 = genome.add_fragment("chrI", s)
g1 = genome.indexed_genome()
g_u = g1.update()
with g_u.update_fragment_by_name("chrI") as f:
f.insert_bases(3, "gataca")
f.remove_bases(10, 4)
g2 = g_u.indexed_genome()
f2 = g2.fragments.all()[0]
self.parent_genome_id = g1.id
self.child_genome_id = g2.id
self.parent_fragment_id = f1.id
self.child_fragment_id = f2.id
def test_gets_location_diff(self):
# gets 200 with error
url = reverse(
"genome-coordinate-diff",
kwargs=dict(
child_genome_id=self.child_genome_id,
parent_genome_id=self.parent_genome_id
)
)
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(
json.loads(res.content),
[
{'parent_fragment_name': 'chrI', 'parent_fragment_id': self.parent_fragment_id,
'parent_starts_at': 3, 'parent_ends_before': 3,
'child_fragment_name': 'chrI', 'child_fragment_id': self.child_fragment_id,
'child_starts_at': 3, 'child_ends_before': 9},
{'parent_fragment_name': 'chrI', 'parent_fragment_id': self.parent_fragment_id,
'parent_starts_at': 4, 'parent_ends_before': 8,
'child_fragment_name': 'chrI', 'child_fragment_id': self.child_fragment_id,
'child_starts_at': 10, 'child_ends_before': 10}
]
)
def test_fails_location_diff(self):
# gets 404 because no child genome
url = reverse(
"genome-coordinate-diff",
kwargs=dict(
child_genome_id=self.child_genome_id + 100,
parent_genome_id=self.parent_genome_id
)
)
res = self.client.get(url)
self.assertEqual(res.status_code, 404)
# gets 200 with error
url = reverse(
"genome-coordinate-diff",
kwargs=dict(
child_genome_id=self.child_genome_id,
parent_genome_id=self.parent_genome_id + 100
)
)
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(
"Genome input is not a parent by lineage",
json.loads(res.content)["error"]
)
class GenomeBlastPcrAPIView(TestCase):
def setUp(self):
genome = Genome.create("Foo")
upstream = "a" * 2000
s = "ttttatggcatattcgcagctactatctagcactacgatcatctagcgatttt"
downstream = "a" * 2000
genome.add_fragment("chrI", upstream + s + downstream)
g = genome.indexed_genome()
self.s = s
self.primers = [s[:15], s[-15:]]
self.genome = g
@mock.patch("edge.blast.blast_genome")
def test_blast_api_calls_blast_genome(self, mocked_blast_genome):
url = reverse(
"genome_blast",
kwargs=dict(genome_id=self.genome.id)
)
self.client.post(
url,
data=dict(program='blastn', query=self.s),
content_type="application/json"
)
mocked_blast_genome.assert_called_once_with(self.genome, 'blastn', self.s, word_size=11)
@mock.patch("edge.blast.blast_genome")
def test_blast_api_calls_blast_genome_word_size_6(self, mocked_blast_genome):
url = reverse(
"genome_blast",
kwargs=dict(genome_id=self.genome.id)
)
self.client.post(
url,
data=dict(program='blastn', query=self.s, word_size=6),
content_type="application/json"
)
print(mocked_blast_genome.call_args_list)
mocked_blast_genome.assert_called_once_with(self.genome, 'blastn', self.s, word_size=6)
@mock.patch("edge.pcr.blast_genome")
def test_pcr_api_calls_blast_genome(self, mocked_blast_genome):
url = reverse(
"genome_pcr",
kwargs=dict(genome_id=self.genome.id)
)
self.client.post(
url,
data=dict(primers=self.primers),
content_type="application/json"
)
mocked_blast_genome.assert_has_calls(
[
mock.call(self.genome, 'blastn', self.primers[0], word_size=11),
mock.call(self.genome, 'blastn', self.primers[1], word_size=11)
], any_order=True
)
@mock.patch("edge.pcr.blast_genome")
def test_pcr_api_calls_blast_genome_word_size_6(self, mocked_blast_genome):
url = reverse(
"genome_pcr",
kwargs=dict(genome_id=self.genome.id)
)
self.client.post(
url,
data=dict(primers=self.primers, blast_word_size=6),
content_type="application/json"
)
print(mocked_blast_genome.call_args_list)
mocked_blast_genome.assert_has_calls(
[
mock.call(self.genome, 'blastn', self.primers[0], word_size=6),
mock.call(self.genome, 'blastn', self.primers[1], word_size=6)
], any_order=True
)
| mit | 20686ba6ba47478a6ed489fdde8928c1 | 35.012552 | 96 | 0.473405 | 3.961978 | false | false | false | false |
ginkgobioworks/edge | src/edge/blastdb.py | 1 | 4077 | import re
import os
import os.path
import shutil
import uuid
import tempfile
import subprocess
from django.conf import settings
from edge.blast import BLAST_DB, default_genome_db_name
from edge.blast import Blast_Accession
from edge.models import Fragment, Genome
from edge.utils import make_required_dirs
def fragment_fasta_fn(fragment):
return "%s/fragment/%s/%s/edge-fragment-%s-nucl.fa" % (
settings.NCBI_DATA_DIR,
fragment.id % 1024,
(fragment.id >> 10) % 1024,
fragment.id,
)
def does_blast_db_have_all_fragments(fragments, dbname):
f = open(dbname + ".nsd")
lines = f.readlines()
f.close()
print("verifying: expecting %s fragments, got %s lines in nsd" % (len(fragments), len(lines)))
return len(fragments) * 2 == len(lines)
def build_fragment_fasta(fragment, refresh=False):
fn = fragment_fasta_fn(fragment)
make_required_dirs(fn)
if not os.path.isfile(fn) or refresh: # have not built this fasta or need refresh
print("building %s" % fn)
# this may take awhile, so do this first, so user interrupt does
# not create an empty file
sequence = fragment.indexed_fragment().sequence
# be really lenient, convert any unknown bp to N
sequence = re.sub(r"[^agctnAGCTN]", "n", sequence)
if fragment.circular is True:
sequence = sequence + sequence
# writing first to a temp file, rename to an expected file location,
# prevents possible race condition of accessing/writing to the same
# file (which may or may not be a problem on a NAS, i don't know).
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmpf:
tmpf.write(
">gnl|edge|%s %s\n%s\n"
% (Blast_Accession.make(fragment), fragment.name, sequence)
)
# i think this will write atomically to dest, and copies over different fs
shutil.move(tmpf.name, fn)
return fn
def build_db(fragments, dbname, refresh=True, attempt=0):
if len(fragments) == 0:
return None
fns = []
for fragment in sorted(fragments, key=lambda f: f.id):
fn = build_fragment_fasta(fragment, refresh)
fns.append(fn)
print("concat fasta files for %s" % dbname)
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
fafile = f.name
for fn in fns:
with open(fn) as inf:
for line in inf:
f.write(line)
# the following prevents concurrent blastdb builds corrupting each other
orig_dbname = dbname
unique_suffix = str(uuid.uuid4())
dbname = "%s_%s" % (dbname, unique_suffix)
print("building blast db %s" % dbname)
make_required_dirs(dbname)
cmd = "%s/makeblastdb -in %s -out %s " % (settings.NCBI_BIN_DIR, fafile, dbname)
cmd += "-title edge -dbtype nucl -parse_seqids -input_type fasta"
r = subprocess.check_output(cmd.split(" "))
if b"Adding sequences from FASTA" not in r:
print(r)
os.unlink(fafile)
if not does_blast_db_have_all_fragments(fragments, dbname) and attempt < 5:
print("does not have all fragments, retry (attempts: %s)" % (attempt + 1,))
return build_db(fragments, orig_dbname, refresh=refresh, attempt=(attempt + 1))
return dbname
def build_genome_db(genome, refresh=False):
if genome.blastdb is None or refresh:
fragments = list(genome.fragments.all())
dbname = build_db(fragments, default_genome_db_name(genome), refresh=refresh)
genome.blastdb = dbname
genome.save()
return dbname
else:
print("already built genome blast db for %s" % genome.id)
return genome.blastdb
def check_and_build_genome_db(genome, refresh=False):
if not genome.blastdb or refresh:
build_genome_db(genome, refresh)
def build_all_genome_dbs(refresh=False):
for genome in Genome.objects.all():
build_genome_db(genome, refresh=refresh)
def build_all_db():
build_db(Fragment.objects.all(), BLAST_DB)
| mit | 19fcb40adf7342d520b17db17db6e992 | 31.616 | 98 | 0.644837 | 3.604775 | false | false | false | false |
westpa/westpa | tests/refs/odld_system.py | 2 | 3853 | import numpy as np
from numpy.random import RandomState
from westpa.core.binning import RectilinearBinMapper
from westpa.core.propagators import WESTPropagator
from westpa.core.systems import WESTSystem
PI = np.pi
pcoord_len = 21
pcoord_dtype = np.float32
class ODLDPropagator(WESTPropagator):
def __init__(self, rc=None):
super().__init__(rc)
self.coord_len = pcoord_len
self.coord_dtype = pcoord_dtype
self.coord_ndim = 1
self.initial_pcoord = np.array([8.0], dtype=self.coord_dtype)
self.sigma = 0.001 ** (0.5)
self.A = 2
self.B = 10
self.C = 0.5
self.x0 = 1
self.prng = RandomState(seed=8675309)
# Implement a reflecting boundary at this x value
# (or None, for no reflection)
self.reflect_at = 10.0
def get_pcoord(self, state):
'''Get the progress coordinate of the given basis or initial state.'''
state.pcoord = self.initial_pcoord.copy()
def gen_istate(self, basis_state, initial_state):
initial_state.pcoord = self.initial_pcoord.copy()
initial_state.istate_status = initial_state.ISTATE_STATUS_PREPARED
return initial_state
def propagate(self, segments):
A, B, C, x0 = self.A, self.B, self.C, self.x0
n_segs = len(segments)
coords = np.empty((n_segs, self.coord_len, self.coord_ndim), dtype=self.coord_dtype)
for iseg, segment in enumerate(segments):
coords[iseg, 0] = segment.pcoord[0]
twopi_by_A = 2 * PI / A
half_B = B / 2
sigma = self.sigma
gradfactor = self.sigma * self.sigma / 2
coord_len = self.coord_len
reflect_at = self.reflect_at
all_displacements = np.zeros((n_segs, self.coord_len, self.coord_ndim), dtype=self.coord_dtype)
for istep in range(1, coord_len):
x = coords[:, istep - 1, 0]
xarg = twopi_by_A * (x - x0)
eCx = np.exp(C * x)
eCx_less_one = eCx - 1.0
# all_displacements[:, istep, 0] = displacements = random_normal(scale=sigma, size=(n_segs,))
all_displacements[:, istep, 0] = displacements = self.prng.normal(scale=sigma, size=(n_segs,))
grad = half_B / (eCx_less_one * eCx_less_one) * (twopi_by_A * eCx_less_one * np.sin(xarg) + C * eCx * np.cos(xarg))
newx = x - gradfactor * grad + displacements
if reflect_at is not None:
# Anything that has moved beyond reflect_at must move back that much
# boolean array of what to reflect
to_reflect = newx > reflect_at
# how far the things to reflect are beyond our boundary
reflect_by = newx[to_reflect] - reflect_at
# subtract twice how far they exceed the boundary by
# puts them the same distance from the boundary, on the other side
newx[to_reflect] -= 2 * reflect_by
coords[:, istep, 0] = newx
for iseg, segment in enumerate(segments):
segment.pcoord[...] = coords[iseg, :]
segment.data['displacement'] = all_displacements[iseg]
segment.status = segment.SEG_STATUS_COMPLETE
return segments
class ODLDSystem(WESTSystem):
def initialize(self):
self.pcoord_ndim = 1
self.pcoord_dtype = pcoord_dtype
self.pcoord_len = pcoord_len
# self.bin_mapper = RectilinearBinMapper([[0,1.3] + list(np.arange(1.4, 10.1, 0.1)) + [float('inf')]])
print(f"Creating bins with {[list(np.arange(0.0, 10.1, 0.1))]}")
# raise Exception
self.bin_mapper = RectilinearBinMapper([list(np.arange(0.0, 10.1, 0.1))])
self.bin_target_counts = np.empty((self.bin_mapper.nbins,), np.int_)
self.bin_target_counts[...] = 10
| mit | 877f3332e2f37c70a33170ed790b718f | 34.027273 | 127 | 0.593563 | 3.335931 | false | false | false | false |
westpa/westpa | src/westpa/core/propagators/__init__.py | 2 | 2374 | import westpa
import itertools
def blocked_iter(blocksize, iterable, fillvalue=None):
# From the Python "itertools recipes" (grouper)
args = [iter(iterable)] * blocksize
return itertools.zip_longest(fillvalue=fillvalue, *args)
class WESTPropagator:
def __init__(self, rc=None):
# For maximum flexibility, the basis states and initial states valid
# at the point in the simulation when the propgator is used must be
# available in several routines, and it is inconvenient to pass them
# to every routine that needs them. A currently-reasonable-seeming solution
# is to store at least the basis states and initial states necessary for
# the current operation (propagation, etc). The set_basis_initial_states() function
# accomplishes this. They are stored as dictionaries of state_id -> state,
# so they can be looked up by ID without needing to store them all (and
# thus potentially send them all over the wire when only one of them is needed, e.g.)
self.basis_states = {}
self.initial_states = {}
self.rc = rc or westpa.rc
def prepare_iteration(self, n_iter, segments):
"""Perform any necessary per-iteration preparation. This is run by the work manager."""
pass
def finalize_iteration(self, n_iter, segments):
"""Perform any necessary post-iteration cleanup. This is run by the work manager."""
pass
# Specific functions required by the WEST framework
def get_pcoord(self, state):
'''Get the progress coordinate of the given basis or initial state.'''
raise NotImplementedError
def gen_istate(self, basis_state, initial_state):
'''Generate a new initial state from the given basis state.'''
raise NotImplementedError
def propagate(self, segments):
"""Propagate one or more segments, including any necessary per-iteration setup and teardown for this propagator."""
raise NotImplementedError
def clear_basis_initial_states(self):
self.basis_states = {}
self.initial_states = {}
def update_basis_initial_states(self, basis_states, initial_states):
self.basis_states.update({state.state_id: state for state in basis_states})
self.initial_states.update({state.state_id: state for state in initial_states})
| mit | 74a32d46362c991bd688a50ae78f366e | 42.163636 | 123 | 0.686184 | 4.347985 | false | false | false | false |
westpa/westpa | src/westpa/work_managers/core.py | 1 | 15061 | #
# This implementation is derived from the ``concurrent.futures``
# module of Python 3.2, by Brian Quinlan, (C) 2011 the Python Software
# Foundation. See http://docs.python.org/3/license.html for more information.
import logging
import signal
import threading
import uuid
from itertools import islice
from contextlib import contextmanager
import h5py
log = logging.getLogger(__name__)
class WorkManager:
'''Base class for all work managers. At a minimum, work managers must provide a
``submit()`` function and a ``n_workers`` attribute (which may be a property),
though most will also override ``startup()`` and ``shutdown()``.'''
@classmethod
def from_environ(cls, wmenv=None):
raise NotImplementedError
@classmethod
def add_wm_args(cls, parser, wmenv=None):
return
def __repr__(self):
return '<{classname} at 0x{id:x}>'.format(classname=self.__class__.__name__, id=id(self))
def __init__(self):
self._sigint_handler_installed = False
self.prior_sigint_handler = None
self.running = False
def __enter__(self):
self.startup()
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
self.shutdown()
return False
def sigint_handler(self, signum, frame):
self.shutdown()
if self.prior_sigint_handler in (signal.SIG_IGN, None):
pass
elif self.prior_sigint_handler == signal.SIG_DFL:
raise KeyboardInterrupt
else:
self.prior_sigint_handler(signum, frame)
def install_sigint_handler(self):
if not self._sigint_handler_installed:
self._sigint_handler_installed = True
self.prior_sigint_handler = signal.signal(signal.SIGINT, self.sigint_handler)
def startup(self):
'''Perform any necessary startup work, such as spawning clients.'''
self.running = True
def shutdown(self):
'''Cleanly shut down any active workers.'''
self.running = False
def run(self):
'''Run the worker loop (in clients only).'''
pass
def submit(self, fn, args=None, kwargs=None):
'''Submit a task to the work manager, returning a `WMFuture` object representing the pending
result. ``fn(*args,**kwargs)`` will be executed by a worker, and the return value assigned as the
result of the returned future. The function ``fn`` and all arguments must be picklable; note
particularly that off-path modules (like the system module and any active plugins) are not
picklable unless pre-loaded in the worker process (i.e. prior to forking the master).'''
raise NotImplementedError
def submit_many(self, tasks):
'''Submit a set of tasks to the work manager, returning a list of `WMFuture` objects representing
pending results. Each entry in ``tasks`` should be a triple (fn, args, kwargs), which will result in
fn(*args, **kwargs) being executed by a worker. The function ``fn`` and all arguments must be
picklable; note particularly that off-path modules are not picklable unless pre-loaded in the worker
process.'''
return [self.submit(fn, args, kwargs) for (fn, args, kwargs) in tasks]
def as_completed(self, futures):
'''Return a generator which yields results from the given ``futures`` as they become
available.'''
pending = set(futures)
# See which futures have results, and install a watcher on those that do not
with WMFuture.all_acquired(pending):
completed = {future for future in futures if future.done}
pending -= completed
watcher = FutureWatcher(pending, threshold=1)
# Yield available results immediately
for future in completed:
yield future
del completed
# Wait on any remaining results
while pending:
watcher.wait()
completed = watcher.reset()
for future in completed:
yield future
pending.remove(future)
def submit_as_completed(self, task_generator, queue_size=None):
'''Return a generator which yields results from a set of ``futures`` as they become
available. Futures are generated by the ``task_generator``, which must return a triple of the form
expected by ``submit``. The method also accepts an int ``queue_size`` that dictates the
maximum number of Futures that should be pending at any given time. The default value of
``None`` submits all of the tasks at once.'''
futures = [self.submit(fn, args, kwargs) for (fn, args, kwargs) in islice(task_generator, queue_size)]
pending = set(futures)
with WMFuture.all_acquired(pending):
watcher = FutureWatcher(pending, threshold=1)
while pending:
watcher.wait()
completed = watcher.reset()
new_futures = [self.submit(fn, args, kwargs) for (fn, args, kwargs) in islice(task_generator, len(completed))]
pending.update(new_futures)
with WMFuture.all_acquired(new_futures):
watcher.add(new_futures)
for future in completed:
yield future
pending.remove(future)
def wait_any(self, futures):
'''Wait on any of the given ``futures`` and return the first one which has a result available.
If more than one result is or becomes available simultaneously, any completed future may be returned.'''
pending = set(futures)
with WMFuture.all_acquired(pending):
completed = {future for future in futures if future.done}
if completed:
# If any futures are complete, then we don't need to do anything else
return completed.pop()
else:
# Otherwise, we need to install a watcher
watcher = FutureWatcher(futures, threshold=1)
watcher.wait()
completed = watcher.reset()
return completed.pop()
def wait_all(self, futures):
'''A convenience function which waits on all the given ``futures`` in order. This function returns
the same ``futures`` as submitted to the function as a list, indicating the order in which waits
occurred.'''
futures = list(futures)
results = []
for future in futures:
results.append(future.result)
return futures
@property
def is_master(self):
'''True if this is the master process for task distribution. This is necessary, e.g., for
MPI, where all processes start identically and then must branch depending on rank.'''
return True
class FutureWatcher:
'''A device to wait on multiple results and/or exceptions with only one lock.'''
def __init__(self, futures, threshold=1):
self.event = threading.Event()
self.lock = threading.RLock()
self.threshold = threshold
self.completed = []
for future in futures:
future._add_watcher(self)
def signal(self, future):
'''Signal this watcher that the given future has results available. If this
brings the number of available futures above signal_threshold, this watcher's
event object will be signalled as well.'''
with self.lock:
self.completed.append(future)
if len(self.completed) >= self.threshold:
self.event.set()
def wait(self):
'''Wait on one or more futures.'''
return self.event.wait()
def reset(self):
'''Reset this watcher's list of completed futures, returning the list of completed futures
prior to resetting it.'''
with self.lock:
self.event.clear()
completed = self.completed
self.completed = []
return completed
def add(self, futures):
'''Add watchers to all futures in the iterable of futures.'''
for future in futures:
future._add_watcher(self)
class WMFuture:
'''A "future", representing work which has been dispatched for completion asynchronously.'''
@staticmethod
@contextmanager
def all_acquired(futures):
'''Context manager to acquire all locks on the given ``futures``. Primarily for internal use.'''
futures = list(futures)
for future in futures:
future._condition.acquire()
yield # to contents of "with" block
for future in futures:
future._condition.release()
def __init__(self, task_id=None):
self.task_id = task_id or uuid.uuid4()
self._condition = threading.Condition()
self._done = False
self._result = None
self._exception = None
self._traceback = None
# a set of Events representing who is waiting on results from this future
# this set will be cleared after the result is updated and watchers are notified
self._watchers = set()
# a set of functions that will be called with this future as an argument when it is updated with a
# result. This list will be cleared after the result is updated and all callbacks invoked
self._update_callbacks = []
def __repr__(self):
return '<WMFuture 0x{id:x}: {self.task_id!s}>'.format(id=id(self), self=self)
def __hash__(self):
return hash(self.task_id)
def _notify_watchers(self):
'''Notify all watchers that this future has been updated, then deletes the list of update watchers.'''
with self._condition:
assert self._done
for watcher in self._watchers:
watcher.signal(self)
self._watchers.clear()
def _invoke_callbacks(self):
'''Invoke all callbacks which have been registered on this future. Exceptions in callbacks
will be logged and ignored.'''
with self._condition:
for callback in self._update_callbacks:
try:
callback(self)
except Exception:
# This may need to be a simple print to stderr, depending on the locking
# semantics of the logger.
log.exception('ignoring exception in result callback')
del self._update_callbacks
self._update_callbacks = []
def _add_watcher(self, watcher):
'''Add the given update watcher to the internal list of watchers. If a result is available,
returns immediately without updating the list of watchers.'''
with self._condition:
if self._done:
watcher.signal(self)
return
else:
self._watchers.add(watcher)
def _add_callback(self, callback):
'''Add the given update callback to the internal list of callbacks. If a result is available,
invokes the callback immediately without updating the list of callbacks.'''
with self._condition:
if self._done:
try:
callback(self)
except Exception:
log.exception('ignoring exception in result callback')
else:
self._update_callbacks.append(callback)
def _set_result(self, result):
'''Set the result of this future to the given value, invoke on-completion callbacks, and notify
watchers.'''
with self._condition:
self._result = result
self._done = True
self._condition.notify_all()
self._invoke_callbacks()
self._notify_watchers()
def _set_exception(self, exception, traceback=None):
'''Set the exception of this future to the given value, invoke on-completion callbacks, and notify
watchers.'''
with self._condition:
self._exception = exception
self._traceback = traceback
self._done = True
self._condition.notify_all()
self._invoke_callbacks()
self._notify_watchers()
def get_result(self, discard=True):
'''Get the result associated with this future, blocking until it is available.
If ``discard`` is true, then removes the reference to the result contained
in this instance, so that a collection of futures need not turn into a cache of
all associated results.'''
with self._condition:
if self._done:
if self._exception:
if isinstance(self._traceback, h5py.string_dtype(encoding='utf-8').type) or isinstance(self._traceback, str):
if self._traceback:
log.error('uncaught exception in remote function\n{}'.format(self._traceback))
raise self._exception
else:
raise self._exception.with_traceback(self._traceback)
else:
self._condition.wait()
assert self._done
if self._exception:
if isinstance(self._traceback, str):
log.error('uncaught exception in remote function\n{}'.format(self._traceback))
raise self._exception
else:
raise self._exception.with_traceback(self._traceback)
result = self._result
if discard:
del self._result
return result
@property
def result(self):
return self.get_result(discard=False)
def wait(self):
'''Wait until this future has a result or exception available.'''
with self._condition:
if self._done:
return
else:
self._condition.wait()
assert self._done
return
def get_exception(self):
'''Get the exception associated with this future, blocking until it is available.'''
with self._condition:
if self._done:
return self._exception
else:
self._condition.wait()
assert self._done
return self._exception
exception = property(get_exception, None, None, get_exception.__doc__)
def get_traceback(self):
'''Get the traceback object associated with this future, if any.'''
with self._condition:
if self._returned:
return self._traceback
else:
self._condition.wait()
assert self._done
return self._traceback
traceback = property(get_traceback, None, None, get_traceback.__doc__)
def is_done(self):
'Indicates whether this future is done executing (may block if this future is being updated).'
with self._condition:
return self._done
done = property(is_done, None, None, is_done.__doc__)
# end class WMFuture
| mit | 4c65d1ecfa862b71378977c024de0c28 | 37.032828 | 129 | 0.60421 | 4.702154 | false | false | false | false |
westpa/westpa | src/westpa/westext/weed/weed_driver.py | 1 | 8129 | import logging
import operator
import numpy as np
import westpa
from westpa.core.yamlcfg import check_bool
from westpa.core.kinetics import RateAverager
from westpa.westext.weed.ProbAdjustEquil import probAdjustEquil
from westpa.core._rc import bins_from_yaml_dict
EPS = np.finfo(np.float64).eps
log = logging.getLogger(__name__)
class WEEDDriver:
def __init__(self, sim_manager, plugin_config):
if not sim_manager.work_manager.is_master:
return
self.sim_manager = sim_manager
self.data_manager = sim_manager.data_manager
self.system = sim_manager.system
self.work_manager = sim_manager.work_manager
self.do_reweight = check_bool(
plugin_config.get('do_equilibrium_reweighting', False) or plugin_config.get('do_reweighting', False)
)
self.windowsize = 0.5
self.windowtype = 'fraction'
windowsize = plugin_config.get('window_size')
if windowsize is not None:
if isinstance(windowsize, float):
self.windowsize = windowsize
self.windowtype = 'fraction'
if self.windowsize <= 0 or self.windowsize > 1:
raise ValueError('WEED parameter error -- fractional window size must be in (0,1]')
elif isinstance(windowsize, int):
self.windowsize = int(windowsize)
self.windowtype = 'fixed'
else:
raise ValueError('WEED parameter error -- invalid window size {!r}'.format(windowsize))
log.info('using window size of {!r} ({})'.format(self.windowsize, self.windowtype))
self.max_windowsize = plugin_config.get('max_window_size')
if self.max_windowsize is not None:
log.info('Using max windowsize of {:d}'.format(self.max_windowsize))
self.reweight_period = plugin_config.get('reweight_period', 0)
self.priority = plugin_config.get('priority', 0)
self.rate_calc_queue_size = plugin_config.get('rate_calc_queue_size', 1)
self.rate_calc_n_blocks = plugin_config.get('rate_calc_n_blocks', 1)
bin_obj = plugin_config.get('bins', None)
if isinstance(bin_obj, dict):
bin_obj = bins_from_yaml_dict(bin_obj)
self.bin_mapper = bin_obj
if self.do_reweight:
sim_manager.register_callback(sim_manager.prepare_new_iteration, self.prepare_new_iteration, self.priority)
def get_rates(self, n_iter, mapper):
'''Get rates and associated uncertainties as of n_iter, according to the window size the user
has selected (self.windowsize)'''
if self.windowtype == 'fraction':
if self.max_windowsize is not None:
eff_windowsize = min(self.max_windowsize, int(n_iter * self.windowsize))
else:
eff_windowsize = int(n_iter * self.windowsize)
else: # self.windowtype == 'fixed':
eff_windowsize = min(n_iter, self.windowsize or 0)
averager = RateAverager(mapper, self.system, self.data_manager, self.work_manager)
averager.calculate(max(1, n_iter - eff_windowsize), n_iter + 1, self.rate_calc_n_blocks, self.rate_calc_queue_size)
self.eff_windowsize = eff_windowsize
return averager
def prepare_new_iteration(self):
n_iter = self.sim_manager.n_iter
we_driver = self.sim_manager.we_driver
if we_driver.target_states and self.do_reweight:
log.warning('equilibrium reweighting requested but target states (sinks) present; reweighting disabled')
return
if not self.do_reweight:
# Reweighting not requested
log.debug('Equilibrium reweighting not enabled')
return
with self.data_manager.lock:
weed_global_group = self.data_manager.we_h5file.require_group('weed')
last_reweighting = int(weed_global_group.attrs.get('last_reweighting', 0))
if n_iter - last_reweighting < self.reweight_period:
# Not time to reweight yet
log.debug('not reweighting')
return
else:
log.debug('reweighting')
if self.bin_mapper is None:
mapper = we_driver.bin_mapper
bins = we_driver.next_iter_binning
westpa.rc.pstatus('\nReweighting using the simulation bin mapper:\n{}'.format(mapper))
else:
mapper = self.bin_mapper
bins = mapper.construct_bins()
segments = [s for s in we_driver.next_iter_segments]
pcoords = self.system.new_pcoord_array(len(segments))
for iseg, segment in enumerate(segments):
pcoords[iseg] = segment.pcoord[0]
assignments = mapper.assign(pcoords)
for (segment, assignment) in zip(segments, assignments):
bins[assignment].add(segment)
westpa.rc.pstatus('\nReweighting using a different bin mapper than simulation:\n{}'.format(mapper))
n_bins = len(bins)
# Create storage for ourselves
with self.data_manager.lock:
iter_group = self.data_manager.get_iter_group(n_iter)
try:
del iter_group['weed']
except KeyError:
pass
weed_iter_group = iter_group.create_group('weed')
avg_populations_ds = weed_iter_group.create_dataset('avg_populations', shape=(n_bins,), dtype=np.float64)
unc_populations_ds = weed_iter_group.create_dataset('unc_populations', shape=(n_bins,), dtype=np.float64)
avg_flux_ds = weed_iter_group.create_dataset('avg_fluxes', shape=(n_bins, n_bins), dtype=np.float64)
unc_flux_ds = weed_iter_group.create_dataset('unc_fluxes', shape=(n_bins, n_bins), dtype=np.float64)
avg_rates_ds = weed_iter_group.create_dataset('avg_rates', shape=(n_bins, n_bins), dtype=np.float64)
unc_rates_ds = weed_iter_group.create_dataset('unc_rates', shape=(n_bins, n_bins), dtype=np.float64)
averager = self.get_rates(n_iter, mapper)
with self.data_manager.flushing_lock():
avg_populations_ds[...] = averager.average_populations
unc_populations_ds[...] = averager.stderr_populations
avg_flux_ds[...] = averager.average_flux
unc_flux_ds[...] = averager.stderr_flux
avg_rates_ds[...] = averager.average_rate
unc_rates_ds[...] = averager.stderr_rate
binprobs = np.fromiter(map(operator.attrgetter('weight'), bins), dtype=np.float64, count=n_bins)
orig_binprobs = binprobs.copy()
westpa.rc.pstatus('Calculating equilibrium reweighting using window size of {:d}'.format(self.eff_windowsize))
westpa.rc.pstatus('\nBin probabilities prior to reweighting:\n{!s}'.format(binprobs))
westpa.rc.pflush()
probAdjustEquil(binprobs, averager.average_rate, averager.stderr_rate)
# Check to see if reweighting has set non-zero bins to zero probability (should never happen)
assert (~((orig_binprobs > 0) & (binprobs == 0))).all(), 'populated bin reweighted to zero probability'
# Check to see if reweighting has set zero bins to nonzero probability (may happen)
z2nz_mask = (orig_binprobs == 0) & (binprobs > 0)
if (z2nz_mask).any():
westpa.rc.pstatus('Reweighting would assign nonzero probability to an empty bin; not reweighting this iteration.')
westpa.rc.pstatus('Empty bins assigned nonzero probability: {!s}.'.format(np.array_str(np.arange(n_bins)[z2nz_mask])))
else:
westpa.rc.pstatus('\nBin populations after reweighting:\n{!s}'.format(binprobs))
for (bin, newprob) in zip(bins, binprobs):
bin.reweight(newprob)
weed_global_group.attrs['last_reweighting'] = n_iter
assert (
abs(1 - np.fromiter(map(operator.attrgetter('weight'), bins), dtype=np.float64, count=n_bins).sum())
< EPS * np.fromiter(map(len, bins), dtype=np.int, count=n_bins).sum()
)
westpa.rc.pflush()
| mit | f4ec5aa878c715679c64fdf46ecc44c2 | 43.664835 | 130 | 0.624062 | 3.585796 | false | true | false | false |
westpa/westpa | src/westpa/cli/tools/w_assign.py | 1 | 25946 | import logging
import math
import os
import numpy as np
from numpy import index_exp
from westpa.core.data_manager import seg_id_dtype, weight_dtype
from westpa.core.binning import index_dtype, assign_and_label, accumulate_labeled_populations
from westpa.tools import WESTParallelTool, WESTDataReader, WESTDSSynthesizer, BinMappingComponent, ProgressIndicatorComponent
import westpa
from westpa.core import h5io
from westpa.core.h5io import WESTPAH5File
from westpa.core.extloader import get_object
log = logging.getLogger('w_assign')
# Changes to keep it alive...
def parse_pcoord_value(pc_str):
namespace = {'math': math, 'numpy': np, 'np': np, 'inf': float('inf')}
arr = np.array(eval(pc_str, namespace))
if arr.ndim == 0:
arr.shape = (1, 1)
elif arr.ndim == 1:
arr.shape = (1,) + arr.shape
else:
raise ValueError('too many dimensions')
return arr
def _assign_label_pop(
n_iter, lb, ub, mapper, nstates, state_map, last_labels, parent_id_dsspec, weight_dsspec, pcoord_dsspec, subsample
):
nbins = len(state_map) - 1
parent_ids = parent_id_dsspec.get_iter_data(n_iter, index_exp[lb:ub])
weights = weight_dsspec.get_iter_data(n_iter, index_exp[lb:ub])
pcoords = pcoord_dsspec.get_iter_data(n_iter, index_exp[lb:ub])
assignments, trajlabels, statelabels = assign_and_label(
lb, ub, parent_ids, mapper.assign, nstates, state_map, last_labels, pcoords, subsample
)
pops = np.zeros((nstates + 1, nbins + 1), weight_dtype)
accumulate_labeled_populations(weights, assignments, trajlabels, pops)
return (assignments, trajlabels, pops, lb, ub, statelabels)
class WAssign(WESTParallelTool):
prog = 'w_assign'
description = '''\
Assign walkers to bins, producing a file (by default named "assign.h5")
which can be used in subsequent analysis.
For consistency in subsequent analysis operations, the entire dataset
must be assigned, even if only a subset of the data will be used. This
ensures that analyses that rely on tracing trajectories always know the
originating bin of each trajectory.
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is provided either by a user-specified function
(--construct-dataset) or a list of "data set specifications" (--dsspecs).
If neither is provided, the progress coordinate dataset ''pcoord'' is used.
To use a custom function to extract or calculate data whose probability
distribution will be calculated, specify the function in standard Python
MODULE.FUNCTION syntax as the argument to --construct-dataset. This function
will be called as function(n_iter,iter_group), where n_iter is the iteration
whose data are being considered and iter_group is the corresponding group
in the main WEST HDF5 file (west.h5). The function must return data which can
be indexed as [segment][timepoint][dimension].
To use a list of data set specifications, specify --dsspecs and then list the
desired datasets one-by-one (space-separated in most shells). These data set
specifications are formatted as NAME[,file=FILENAME,slice=SLICE], which will
use the dataset called NAME in the HDF5 file FILENAME (defaulting to the main
WEST HDF5 file west.h5), and slice it with the Python slice expression SLICE
(as in [0:2] to select the first two elements of the first axis of the
dataset). The ``slice`` option is most useful for selecting one column (or
more) from a multi-column dataset, such as arises when using a progress
coordinate of multiple dimensions.
-----------------------------------------------------------------------------
Specifying macrostates
-----------------------------------------------------------------------------
Optionally, kinetic macrostates may be defined in terms of sets of bins.
Each trajectory will be labeled with the kinetic macrostate it was most
recently in at each timepoint, for use in subsequent kinetic analysis.
This is required for all kinetics analysis (w_kintrace and w_kinmat).
There are three ways to specify macrostates:
1. States corresponding to single bins may be identified on the command
line using the --states option, which takes multiple arguments, one for
each state (separated by spaces in most shells). Each state is specified
as a coordinate tuple, with an optional label prepended, as in
``bound:1.0`` or ``unbound:(2.5,2.5)``. Unlabeled states are named
``stateN``, where N is the (zero-based) position in the list of states
supplied to --states.
2. States corresponding to multiple bins may use a YAML input file specified
with --states-from-file. This file defines a list of states, each with a
name and a list of coordinate tuples; bins containing these coordinates
will be mapped to the containing state. For instance, the following
file::
---
states:
- label: unbound
coords:
- [9.0, 1.0]
- [9.0, 2.0]
- label: bound
coords:
- [0.1, 0.0]
produces two macrostates: the first state is called "unbound" and
consists of bins containing the (2-dimensional) progress coordinate
values (9.0, 1.0) and (9.0, 2.0); the second state is called "bound"
and consists of the single bin containing the point (0.1, 0.0).
3. Arbitrary state definitions may be supplied by a user-defined function,
specified as --states-from-function=MODULE.FUNCTION. This function is
called with the bin mapper as an argument (``function(mapper)``) and must
return a list of dictionaries, one per state. Each dictionary must contain
a vector of coordinate tuples with key "coords"; the bins into which each
of these tuples falls define the state. An optional name for the state
(with key "label") may also be provided.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "assign.h5") contains the following
attributes datasets:
``nbins`` attribute
*(Integer)* Number of valid bins. Bin assignments range from 0 to
*nbins*-1, inclusive.
``nstates`` attribute
*(Integer)* Number of valid macrostates (may be zero if no such states are
specified). Trajectory ensemble assignments range from 0 to *nstates*-1,
inclusive, when states are defined.
``/assignments`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint assignments (bin indices).
``/npts`` [iteration]
*(Integer)* Number of timepoints in each iteration.
``/nsegs`` [iteration]
*(Integer)* Number of segments in each iteration.
``/labeled_populations`` [iterations][state][bin]
*(Floating-point)* Per-iteration and -timepoint bin populations, labeled
by most recently visited macrostate. The last state entry (*nstates-1*)
corresponds to trajectories initiated outside of a defined macrostate.
``/bin_labels`` [bin]
*(String)* Text labels of bins.
When macrostate assignments are given, the following additional datasets are
present:
``/trajlabels`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint trajectory labels, indicating the
macrostate which each trajectory last visited.
``/state_labels`` [state]
*(String)* Labels of states.
``/state_map`` [bin]
*(Integer)* Mapping of bin index to the macrostate containing that bin.
An entry will contain *nbins+1* if that bin does not fall into a
macrostate.
Datasets indexed by state and bin contain one more entry than the number of
valid states or bins. For *N* bins, axes indexed by bin are of size *N+1*, and
entry *N* (0-based indexing) corresponds to a walker outside of the defined bin
space (which will cause most mappers to raise an error). More importantly, for
*M* states (including the case *M=0* where no states are specified), axes
indexed by state are of size *M+1* and entry *M* refers to trajectories
initiated in a region not corresponding to a defined macrostate.
Thus, ``labeled_populations[:,:,:].sum(axis=1)[:,:-1]`` gives overall per-bin
populations, for all defined bins and
``labeled_populations[:,:,:].sum(axis=2)[:,:-1]`` gives overall
per-trajectory-ensemble populations for all defined states.
-----------------------------------------------------------------------------
Parallelization
-----------------------------------------------------------------------------
This tool supports parallelized binning, including reading/calculating input
data.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
# Parallel processing by default (this is not actually necessary, but it is
# informative!)
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
self.data_reader = WESTDataReader()
self.dssynth = WESTDSSynthesizer(default_dsname='pcoord')
self.binning = BinMappingComponent()
self.progress = ProgressIndicatorComponent()
self.output_file = None
self.output_filename = None
self.states = []
self.subsample = False
def add_args(self, parser):
self.data_reader.add_args(parser)
self.binning.add_args(parser)
self.dssynth.add_args(parser)
sgroup = parser.add_argument_group('macrostate definitions').add_mutually_exclusive_group()
sgroup.add_argument(
'--states',
nargs='+',
metavar='STATEDEF',
help='''Single-bin kinetic macrostate, specified by a coordinate tuple (e.g. '1.0' or '[1.0,1.0]'),
optionally labeled (e.g. 'bound:[1.0,1.0]'). States corresponding to multiple bins
must be specified with --states-from-file.''',
)
sgroup.add_argument(
'--states-from-file',
metavar='STATEFILE',
help='''Load kinetic macrostates from the YAML file STATEFILE. See description
above for the appropriate structure.''',
)
sgroup.add_argument(
'--states-from-function',
metavar='STATEFUNC',
help='''Load kinetic macrostates from the function STATEFUNC, specified as
module_name.func_name. This function is called with the bin mapper as an argument,
and must return a list of dictionaries {'label': state_label, 'coords': 2d_array_like}
one for each macrostate; the 'coords' entry must contain enough rows to identify all bins
in the macrostate.''',
)
agroup = parser.add_argument_group('other options')
agroup.add_argument(
'-o', '--output', dest='output', default='assign.h5', help='''Store results in OUTPUT (default: %(default)s).'''
)
agroup.add_argument(
'--subsample',
dest='subsample',
action='store_const',
const=True,
help='''Determines whether or not the data should be subsampled.
This is rather useful for analysing steady state simulations.''',
)
agroup.add_argument(
'--config-from-file',
dest='config_from_file',
action='store_true',
help='''Load bins/macrostates from a scheme specified in west.cfg.''',
)
agroup.add_argument('--scheme-name', dest='scheme', help='''Name of scheme specified in west.cfg.''')
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
# Necessary to open the file to get the current iteration
# if we want to use the mapper in the file
self.data_reader.open(mode='r+')
self.n_iter = self.data_reader.current_iteration
# If we decide to use this option for iteration selection:
# getattr(args,'bins_from_h5file',None) or self.data_reader.current_iteration
with self.data_reader:
self.dssynth.h5filename = self.data_reader.we_h5filename
self.dssynth.process_args(args)
if args.config_from_file is False:
self.binning.set_we_h5file_info(self.n_iter, self.data_reader)
self.binning.process_args(args)
self.output_filename = args.output
if args.config_from_file:
if not args.scheme:
raise ValueError('A scheme must be specified.')
else:
self.load_config_from_west(args.scheme)
elif args.states:
self.parse_cmdline_states(args.states)
elif args.states_from_file:
self.load_state_file(args.states_from_file)
elif args.states_from_function:
self.load_states_from_function(get_object(args.states_from_function, path=['.']))
if self.states and len(self.states) < 2:
raise ValueError('zero, two, or more macrostates are required')
# self.output_file = WESTPAH5File(args.output, 'w', creating_program=True)
log.debug('state list: {!r}'.format(self.states))
self.subsample = args.subsample if args.subsample is not None else False
def parse_cmdline_states(self, state_strings):
states = []
for istring, state_string in enumerate(state_strings):
try:
(label, coord_str) = state_string.split(':')
except ValueError:
label = 'state{}'.format(istring)
coord_str = state_string
coord = parse_pcoord_value(coord_str)
states.append({'label': label, 'coords': coord})
self.states = states
def load_config_from_west(self, scheme):
try:
config = westpa.rc.config['west']['analysis']
except Exception:
raise ValueError('There is no configuration file specified.')
ystates = config['analysis_schemes'][scheme]['states']
self.states_from_dict(ystates)
try:
self.subsample = config['subsample']
except Exception:
pass
from westpa.core._rc import bins_from_yaml_dict
self.binning.mapper = bins_from_yaml_dict(config['analysis_schemes'][scheme]['bins'][0])
path = os.path.join(os.getcwd(), config['directory'], scheme)
try:
os.mkdir(config['directory'])
os.mkdir(path)
except Exception:
pass
self.output_filename = os.path.join(path, 'assign.h5')
def load_state_file(self, state_filename):
import yaml
ydict = yaml.load(open(state_filename, 'rt'), Loader=yaml.Loader)
ystates = ydict['states']
self.states_from_dict(ystates)
def states_from_dict(self, ystates):
states = []
for istate, ystate in enumerate(ystates):
state = {}
state['label'] = ystate.get('label', 'state{}'.format(istate))
# coords can be:
# - a scalar, in which case it is one bin, 1-D
# - a single list, which is rejected as ambiguous
# - a list of lists, which is a list of coordinate tuples
coords = np.array(ystate['coords'])
if coords.ndim == 0:
coords.shape = (1, 1)
elif coords.ndim == 1:
raise ValueError(
'list {!r} is ambiguous (list of 1-d coordinates, or single multi-d coordinate?)'.format(ystate['coords'])
)
elif coords.ndim > 2:
raise ValueError('coordinates must be 2-D')
state['coords'] = coords
states.append(state)
self.states = states
def load_states_from_function(self, statefunc):
states = statefunc(self.binning.mapper)
for istate, state in enumerate(states):
state.setdefault('label', 'state{}'.format(istate))
try:
state['coords'] = np.array(state['coords'])
except KeyError:
raise ValueError('state function {!r} returned a state {!r} without coordinates'.format(statefunc, state))
self.states = states
log.debug('loaded states: {!r}'.format(self.states))
def assign_iteration(self, n_iter, nstates, nbins, state_map, last_labels):
'''Method to encapsulate the segment slicing (into n_worker slices) and parallel job submission
Submits job(s), waits on completion, splices them back together
Returns: assignments, trajlabels, pops for this iteration'''
futures = []
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs, npts = iter_group['pcoord'].shape[:2]
n_workers = self.work_manager.n_workers or 1
assignments = np.empty((nsegs, npts), dtype=index_dtype)
trajlabels = np.empty((nsegs, npts), dtype=index_dtype)
statelabels = np.empty((nsegs, npts), dtype=index_dtype)
pops = np.zeros((nstates + 1, nbins + 1), dtype=weight_dtype)
# Submit jobs to work manager
blocksize = nsegs // n_workers
if nsegs % n_workers > 0:
blocksize += 1
def task_gen():
if __debug__:
checkset = set()
for lb in range(0, nsegs, blocksize):
ub = min(nsegs, lb + blocksize)
if __debug__:
checkset.update(set(range(lb, ub)))
args = ()
kwargs = dict(
n_iter=n_iter,
lb=lb,
ub=ub,
mapper=self.binning.mapper,
nstates=nstates,
state_map=state_map,
last_labels=last_labels,
parent_id_dsspec=self.data_reader.parent_id_dsspec,
weight_dsspec=self.data_reader.weight_dsspec,
pcoord_dsspec=self.dssynth.dsspec,
subsample=self.subsample,
)
yield (_assign_label_pop, args, kwargs)
# futures.append(self.work_manager.submit(_assign_label_pop,
# kwargs=)
if __debug__:
assert checkset == set(range(nsegs)), 'segments missing: {}'.format(set(range(nsegs)) - checkset)
# for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(task_gen(), queue_size=self.max_queue_len):
assign_slice, traj_slice, slice_pops, lb, ub, state_slice = future.get_result(discard=True)
assignments[lb:ub, :] = assign_slice
trajlabels[lb:ub, :] = traj_slice
statelabels[lb:ub, :] = state_slice
pops += slice_pops
del assign_slice, traj_slice, slice_pops, state_slice
del futures
return (assignments, trajlabels, pops, statelabels)
def go(self):
assert self.data_reader.parent_id_dsspec._h5file is None
assert self.data_reader.weight_dsspec._h5file is None
if hasattr(self.dssynth.dsspec, '_h5file'):
assert self.dssynth.dsspec._h5file is None
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi, self.data_reader, WESTPAH5File(self.output_filename, 'w', creating_program=True) as self.output_file:
assign = self.binning.mapper.assign
# We always assign the entire simulation, so that no trajectory appears to start
# in a transition region that doesn't get initialized in one.
iter_start = 1
iter_stop = self.data_reader.current_iteration
h5io.stamp_iter_range(self.output_file, iter_start, iter_stop)
nbins = self.binning.mapper.nbins
self.output_file.attrs['nbins'] = nbins
state_map = np.empty((self.binning.mapper.nbins + 1,), index_dtype)
state_map[:] = 0 # state_id == nstates => unknown state
# Recursive mappers produce a generator rather than a list of labels
# so consume the entire generator into a list
labels = [np.string_(label) for label in self.binning.mapper.labels]
self.output_file.create_dataset('bin_labels', data=labels, compression=9)
if self.states:
nstates = len(self.states)
state_map[:] = nstates # state_id == nstates => unknown state
state_labels = [np.string_(state['label']) for state in self.states]
for istate, sdict in enumerate(self.states):
assert state_labels[istate] == np.string_(sdict['label']) # sanity check
state_assignments = assign(sdict['coords'])
for assignment in state_assignments:
state_map[assignment] = istate
self.output_file.create_dataset('state_map', data=state_map, compression=9, shuffle=True)
self.output_file['state_labels'] = state_labels # + ['(unknown)']
else:
nstates = 0
self.output_file.attrs['nstates'] = nstates
# Stamp if this has been subsampled.
self.output_file.attrs['subsampled'] = self.subsample
iter_count = iter_stop - iter_start
nsegs = np.empty((iter_count,), seg_id_dtype)
npts = np.empty((iter_count,), seg_id_dtype)
# scan for largest number of segments and largest number of points
pi.new_operation('Scanning for segment and point counts', iter_stop - iter_start)
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs[iiter], npts[iiter] = iter_group['pcoord'].shape[0:2]
pi.progress += 1
del iter_group
pi.new_operation('Preparing output')
# create datasets
self.output_file.create_dataset('nsegs', data=nsegs, shuffle=True, compression=9)
self.output_file.create_dataset('npts', data=npts, shuffle=True, compression=9)
max_nsegs = nsegs.max()
max_npts = npts.max()
assignments_shape = (iter_count, max_nsegs, max_npts)
assignments_dtype = np.min_scalar_type(nbins)
assignments_ds = self.output_file.create_dataset(
'assignments',
dtype=assignments_dtype,
shape=assignments_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, assignments_dtype),
fillvalue=nbins,
)
if self.states:
trajlabel_dtype = np.min_scalar_type(nstates)
trajlabels_ds = self.output_file.create_dataset(
'trajlabels',
dtype=trajlabel_dtype,
shape=assignments_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates,
)
statelabels_ds = self.output_file.create_dataset(
'statelabels',
dtype=trajlabel_dtype,
shape=assignments_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates,
)
pops_shape = (iter_count, nstates + 1, nbins + 1)
pops_ds = self.output_file.create_dataset(
'labeled_populations',
dtype=weight_dtype,
shape=pops_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(pops_shape, weight_dtype),
)
h5io.label_axes(pops_ds, [np.string_(i) for i in ['iteration', 'state', 'bin']])
pi.new_operation('Assigning to bins', iter_stop - iter_start)
last_labels = None # mapping of seg_id to last macrostate inhabited
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
# get iteration info in this block
if iiter == 0:
last_labels = np.empty((nsegs[iiter],), index_dtype)
last_labels[:] = nstates # unknown state
# Slices this iteration into n_workers groups of segments, submits them to wm, splices results back together
assignments, trajlabels, pops, statelabels = self.assign_iteration(n_iter, nstates, nbins, state_map, last_labels)
# Do stuff with this iteration's results
last_labels = trajlabels[:, -1].copy()
assignments_ds[iiter, 0 : nsegs[iiter], 0 : npts[iiter]] = assignments
pops_ds[iiter] = pops
if self.states:
trajlabels_ds[iiter, 0 : nsegs[iiter], 0 : npts[iiter]] = trajlabels
statelabels_ds[iiter, 0 : nsegs[iiter], 0 : npts[iiter]] = statelabels
pi.progress += 1
del assignments, trajlabels, pops, statelabels
for dsname in 'assignments', 'npts', 'nsegs', 'labeled_populations', 'statelabels':
h5io.stamp_iter_range(self.output_file[dsname], iter_start, iter_stop)
def entry_point():
WAssign().main()
if __name__ == '__main__':
entry_point()
| mit | f5a0c13b06d245dd07cd1515aea91861 | 42.460637 | 130 | 0.598127 | 4.08856 | false | false | false | false |
westpa/westpa | src/westpa/trajtree/trajtree.py | 2 | 4499 | import collections
import numpy as np
import westpa
from westpa.tools.selected_segs import AllSegmentSelection
from . import _trajtree
from ._trajtree import _trajtree_base # @UnresolvedImport
trajnode = collections.namedtuple('trajnode', ('n_iter', 'seg_id'))
class TrajTreeSet(_trajtree_base):
def __init__(self, segsel=None, data_manager=None):
self.data_manager = data_manager or westpa.rc.get_data_manager()
self.segsel = segsel or AllSegmentSelection(data_manager=self.data_manager)
self._build_table(self.segsel, self.data_manager)
def __len__(self):
return len(self.trajtable)
def get_roots(self):
return self.trajtable[self.trajtable['parent_offset'] == -1]
# return [trajnode(root['n_iter'], root['seg_id']) for root in self._get_roots()]
def get_root_indices(self):
return np.squeeze(np.argwhere(self.trajtable['parent_offset'] == -1))
def trace_trajectories(self, visit, get_visitor_state=None, set_visitor_state=None, vargs=None, vkwargs=None):
if (get_visitor_state or set_visitor_state) and not (get_visitor_state and set_visitor_state):
raise ValueError('either both or neither of get_visitor_state and set_visitor_state must be specified')
vargs = vargs or ()
vkwargs = vkwargs or {}
n_visits = 0
trajtable = self.trajtable
roots = collections.deque(self.get_root_indices())
print('Examining {:d} roots'.format(len(roots)))
state_stack = collections.deque([{'subtrees': roots, 'vstate': get_visitor_state() if get_visitor_state else None}])
while state_stack:
state = state_stack.pop()
subtrees = state['subtrees']
if set_visitor_state:
set_visitor_state(state['vstate'])
while subtrees:
index = subtrees.popleft()
node = trajtable[index]
state_stack.append({'subtrees': subtrees, 'vstate': get_visitor_state() if get_visitor_state else None})
subtrees = collections.deque(self.get_child_indices(index))
n_visits += 1
try:
visit(node['n_iter'], node['seg_id'], node['weight'], has_children=(len(subtrees) > 0), *vargs, **vkwargs)
except StopIteration:
subtrees = collections.deque()
continue # to next sibling
return n_visits
class FakeTrajTreeSet(TrajTreeSet):
def __init__(self):
# _tt_dtype = np.dtype([('n_iter', np.uint32),
# ('seg_id', np.int64),
# ('parent_id', np.int64),
# ('parent_offset', np.int64), # offset of parent segment into this table
# ('weight', np.float64)]) # weight of this segment
self.trajtable = np.array(
[
(1, 1, -1, -1, 1.0), # 0
(1, 11, -1, -1, 1.0), # 1
(2, 2, 1, 0, 1.0), # 2
(2, 3, 1, 0, 1.0), # 3
(2, 4, 1, 0, 1.0), # 4
(2, 12, 11, 1, 1.0), # 5
(2, 13, 11, 1, 1.0), # 6
(3, 5, 2, 2, 1.0), # 7
(3, 6, 3, 3, 1.0), # 8
(3, 7, 4, 4, 1.0), # 9
(3, 8, 4, 4, 1.0), # 10
(3, 14, 12, 5, 1.0), # 11
(3, 15, 12, 5, 1.0), # 12
(4, 9, 5, 7, 1.0), # 13
(4, 10, 5, 7, 1.0), # 14
],
dtype=_trajtree._tt_dtype,
)
empty_array = np.array([])
self.childtable = np.array(
[
np.array([2, 3, 4]), # segment 1
np.array([5, 6]), # segment 11
np.array([7]), # segment 2
np.array([8]), # segment 3
np.array([9, 10]), # segment 4
np.array([11, 12]), # segment 12
empty_array, # segment 13
np.array([13, 14]), # segment 5
empty_array, # segment 6
empty_array, # segment 7
empty_array, # segment 8
empty_array, # segment 14
empty_array, # segment 15
empty_array, # segment 9
empty_array, # segment 10
],
dtype=np.object_,
)
self.iter_offsets = {1: 0, 2: 2, 3: 7, 4: 13}
| mit | 4ead9d93108a424bce9ad17bc073b915 | 36.806723 | 126 | 0.493665 | 3.556522 | false | false | false | false |
westpa/westpa | examples/ex_fluxanl_plot.py | 2 | 1119 | import argparse
import h5py
from matplotlib import pyplot
parser = argparse.ArgumentParser(description='''Plot flux evolution obtained with w_fluxanl.''')
parser.add_argument('-s', '--state', default=0, help='''Create a plot for target state index STATE (default: %(default)s)''')
parser.add_argument('-i', '--input', default='fluxanl.h5', help='''Take data from INPUT (default: %(default)s)''')
parser.add_argument(
'-o', '--output', default='fluxevol.pdf', help='''Write plot of flux evolution to OUTPUT (default: %(default)s)'''
)
args = parser.parse_args()
h5file = h5py.File(args.input, 'r')
evol = h5file['flux_evol'][...]
evol_iters = h5file['flux_evol_iterations'][...]
pyplot.figure()
iters = evol_iters['iter_stop'] - 1
pyplot.plot(iters, evol['mean'], color='black')
pyplot.plot(iters, evol['ci_lb'], color='gray')
pyplot.plot(iters, evol['ci_ub'], color='gray')
pyplot.title(
r'Flux evolution for state {} at $\alpha=${} confidence interval'.format(args.state, h5file['flux_evol'].attrs['mcbs_alpha'])
)
pyplot.xlabel('Iteration')
pyplot.ylabel(r'Flux ($\tau^{-1}$)')
pyplot.savefig(args.output)
| mit | faf88f34df201495591c0d8b7ca46c3d | 40.444444 | 129 | 0.689008 | 3.09116 | false | false | false | false |
westpa/westpa | tests/test_tools/test_w_ipa.py | 2 | 2590 | import os
import shutil
from h5diff import H5Diff
import unittest
import pytest
import argparse
from unittest import mock
from westpa.cli.tools.w_ipa import entry_point
class Test_W_IPA(unittest.TestCase):
test_name = 'W_IPA'
def test_run_w_ipa(self):
'''Testing if w_ipa runs as expected and the h5 files looks good.'''
ref_dir = os.path.join(os.path.dirname(__file__), '../refs')
shutil.copy2(os.path.join(ref_dir, 'west_ref.cfg'), './west.cfg')
shutil.copy2(os.path.join(ref_dir, 'west_ref.h5'), './west.h5')
os.system('w_ipa -ao')
assert os.path.isfile('./ANALYSIS/TEST/assign.h5'), "The assign.h5 file was not generated."
assert os.path.isfile('./ANALYSIS/TEST/direct.h5'), "The direct.h5 file was not generated."
diff = H5Diff(os.path.join(ref_dir, 'assign_ipa_ref.h5'), './ANALYSIS/TEST/assign.h5')
# TODO: this is broken
# diff = H5Diff('../refs/direct_ipa_ref.h5', './ANALYSIS/TEST/direct.h5')
diff.check()
shutil.rmtree('ANALYSIS')
os.remove('west.h5')
os.remove('west.cfg')
@pytest.mark.skip(reason="work-in-progress test that uses entry point")
class Test_W_IPA_new:
def test_run_w_ipa(self, ref_50iter):
'''Testing if w_ipa runs as expected and the h5 files looks good.'''
with mock.patch(
target='argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(
rcfile=self.cfg_filepath,
verbosity='debug',
work_manager=None,
analysis_mode=True,
max_queue_length=None,
debug_mode=True,
we_h5filename=self.h5_filepath,
scheme='TEST',
reanalyze=False,
ignore_hash=False,
plotting=False,
construct_dataset=False,
dsspecs=None,
output='assign.h5',
subsample=None,
config_from_file=True,
),
):
entry_point()
assert os.path.isfile('./ANALYSIS/TEST/assign.h5'), "The assign.h5 file was not generated."
assert os.path.isfile('./ANALYSIS/TEST/direct.h5'), "The direct.h5 file was not generated."
diff = H5Diff('assign_ipa_ref.h5', './ANALYSIS/TEST/assign.h5')
diff.check()
# TODO: this is broken
# diff2 = H5Diff('direct_ipa_ref.h5', './ANALYSIS/TEST/direct.h5')
# diff2.check()
# cleanup
shutil.rmtree('ANALYSIS')
| mit | e4d2bdb9ecae051efc33dc0234ef7582 | 35.478873 | 99 | 0.571429 | 3.5 | false | true | false | false |
westpa/westpa | src/westpa/oldtools/aframe/mcbs.py | 2 | 5928 | '''
Tools for Monte Carlo bootstrap error analysis
'''
import logging
import math
import numpy as np
import westpa
from westpa.oldtools.aframe import AnalysisMixin
log = logging.getLogger(__name__)
class MCBSMixin(AnalysisMixin):
def __init__(self):
super().__init__()
self.mcbs_alpha = None
self.mcbs_nsets = None
self.mcbs_display_confidence = None
def add_args(self, parser, upcall=True):
if upcall:
try:
upfunc = super().add_args
except AttributeError:
pass
else:
upfunc(parser)
group = parser.add_argument_group('Monte Carlo bootstrap options')
group.add_argument(
'--confidence',
dest='mcbs_confidence',
type=float,
default=0.95,
metavar='P',
help='''Construct a confidence interval of width P (default: 0.95=95%%).''',
)
group.add_argument(
'--bssize',
dest='mcbs_nsets',
type=int,
metavar='NSETS',
help='''Use NSETS synthetic data sets to calculate confidence intervals (default:
calculated based on confidence level, but not less than 1000).''',
)
def process_args(self, args, upcall=True):
self.mcbs_alpha = 1 - args.mcbs_confidence
self.mcbs_nsets = args.mcbs_size if args.mcbs_nsets else min(1000, calc_mcbs_nsets(self.mcbs_alpha))
self.mcbs_display_confidence = '{:.{cp}f}'.format(
100 * args.mcbs_confidence, cp=-int(math.floor(math.log10(self.mcbs_alpha))) - 2
)
westpa.rc.pstatus(
'Using bootstrap of {:d} sets to calculate {:s}% confidence interval (alpha={:g}).'.format(
self.mcbs_nsets, self.mcbs_display_confidence, self.mcbs_alpha
)
)
if upcall:
try:
upfunc = super().process_args
except AttributeError:
pass
else:
upfunc(args)
def calc_mcbs_nsets(self, alpha=None):
alpha = alpha or self.mcbs_alpha
return calc_mcbs_nsets(alpha)
def calc_ci_bound_indices(self, n_sets=None, alpha=None):
n_sets = n_sets or self.mcbs_nsets
alpha = alpha or self.mcbs_alpha
return calc_ci_bound_indices(n_sets, alpha)
ciinfo_dtype = np.dtype([('expectation', np.float64), ('ci_lower', np.float64), ('ci_upper', np.float64)])
def calc_mcbs_nsets(alpha):
'''Return a bootstrap data set size appropriate for the given confidence level.'''
return int(10 ** (math.ceil(-math.log10(alpha)) + 1))
def calc_ci_bound_indices(n_sets, alpha):
return (int(math.floor(n_sets * alpha / 2)), int(math.ceil(n_sets * (1 - alpha / 2))))
def bootstrap_ci_ll(estimator, data, alpha, n_sets, storage, sort, eargs=(), ekwargs={}, fhat=None):
'''Low-level routine for calculating bootstrap error estimates. Arguments and return values are as those for
``bootstrap_ci``, except that no argument is optional except additional arguments for the estimator (``eargs``, ``ekwargs``).
``data`` must be an array (or subclass), and an additional array ``storage`` must be provided, which
must be appropriately shaped and typed to hold ``n_sets`` results from ``estimator``. Further, if the
value ``fhat`` of the estimator must be pre-calculated to allocate ``storage``, then its value may be
passed; otherwise, ``estimator(data,*eargs,**kwargs)`` will be called to calculate it.'''
if fhat is None:
fhat = estimator(data, *eargs, **ekwargs)
dlen = len(data)
for iset in range(n_sets):
indices = np.random.randint(dlen, size=(dlen,))
storage[iset] = estimator(data[indices], *eargs, **ekwargs)
synth_sorted = sort(storage)
lbi = int(math.floor(n_sets * alpha / 2))
ubi = int(math.ceil(n_sets * (1 - alpha / 2)))
lb = synth_sorted[lbi]
ub = synth_sorted[ubi]
try:
return (fhat, lb, ub, ub - lb, abs((ub - lb) / fhat) if fhat else 0, max(ub - fhat, fhat - lb))
finally:
del fhat, lb, ub, indices
def bootstrap_ci(estimator, data, alpha, n_sets=None, sort=np.msort, eargs=(), ekwargs={}):
'''Perform a Monte Carlo bootstrap of a (1-alpha) confidence interval for the given ``estimator``.
Returns (fhat, ci_lower, ci_upper), where fhat is the result of ``estimator(data, *eargs, **ekwargs)``,
and ``ci_lower`` and ``ci_upper`` are the lower and upper bounds of the surrounding confidence
interval, calculated by calling ``estimator(syndata, *eargs, **ekwargs)`` on each synthetic data
set ``syndata``. If ``n_sets`` is provided, that is the number of synthetic data sets generated,
otherwise an appropriate size is selected automatically (see ``calc_mcbs_nsets()``).
``sort``, if given, is applied to sort the results of calling ``estimator`` on each
synthetic data set prior to obtaining the confidence interval. This function must sort
on the last index.
Individual entries in synthetic data sets are selected by the first index of ``data``, allowing this
function to be used on arrays of multidimensional data.
Returns (fhat, lb, ub, ub-lb, abs((ub-lb)/fhat), and max(ub-fhat,fhat-lb)) (that is, the estimated value, the
lower and upper bounds of the confidence interval, the width of the confidence interval, the relative
width of the confidence interval, and the symmetrized error bar of the confidence interval).'''
data = np.asanyarray(data)
fhat = np.squeeze(estimator(data, *eargs, **ekwargs))
n_sets = n_sets or calc_mcbs_nsets(alpha)
fsynth = np.empty((n_sets,), dtype=fhat.dtype)
try:
return bootstrap_ci_ll(estimator, data, alpha, n_sets or calc_mcbs_nsets(alpha), fsynth, sort, eargs, ekwargs, fhat)
finally:
del fsynth
| mit | cfd8561a4e3882c5bd1f576d738285be | 39.60274 | 129 | 0.628036 | 3.661519 | false | false | false | false |
westpa/westpa | src/westpa/tools/iter_range.py | 2 | 8730 | import logging
import numpy as np
import westpa
from westpa.tools.core import WESTToolComponent
from westpa.core import h5io
log = logging.getLogger(__name__)
class IterRangeSelection(WESTToolComponent):
'''Select and record limits on iterations used in analysis and/or reporting.
This class provides both the user-facing command-line options and parsing, and
the application-side API for recording limits in HDF5.
HDF5 datasets calculated based on a restricted set of iterations should be tagged
with the following attributes:
``first_iter``
The first iteration included in the calculation.
``last_iter``
One past the last iteration included in the calculation.
``iter_step``
Blocking or sampling period for iterations included in the calculation.
'''
def __init__(self, data_manager=None):
super().__init__()
self.data_manager = data_manager
# First iteration on which to perform analysis/reporting
self.iter_start = None
# One past the last iteration on which to perform analysis/reporting
self.iter_stop = None
# Step
self.iter_step = None
self.iter_count = None
self.include_args.update({'iter_start': True, 'iter_stop': True, 'iter_step': False})
def add_args(self, parser):
group = parser.add_argument_group('iteration range')
if self.include_args['iter_start']:
group.add_argument(
'--first-iter',
dest='first_iter',
type=int,
metavar='N_ITER',
default=1,
help='''Begin analysis at iteration N_ITER (default: %(default)d).''',
)
if self.include_args['iter_stop']:
group.add_argument(
'--last-iter',
dest='last_iter',
type=int,
metavar='N_ITER',
help='''Conclude analysis with N_ITER, inclusive (default: last completed iteration).''',
)
if self.include_args['iter_step']:
group.add_argument(
'--step-iter', dest='step_iter', type=int, metavar='STEP', help='''Analyze/report in blocks of STEP iterations.'''
)
def process_args(self, args, override_iter_start=None, override_iter_stop=None, default_iter_step=1):
if override_iter_start is not None:
self.iter_start = override_iter_start
elif args.first_iter is not None:
self.iter_start = args.first_iter
else:
self.iter_start = 1
if override_iter_stop is not None:
self.iter_stop = override_iter_stop
elif args.last_iter is not None:
self.iter_stop = args.last_iter + 1
else:
self.iter_stop = (self.data_manager or westpa.rc.get_data_manager()).current_iteration
if self.include_args['iter_step']:
self.iter_step = args.step_iter or default_iter_step
try:
self.iter_count = self.iter_stop - self.iter_start
except TypeError:
# one or both are None
pass
def iter_block_iter(self):
'''Return an iterable of (block_start,block_end) over the blocks of iterations
selected by --first-iter/--last-iter/--step-iter.'''
for blkfirst in range(self.iter_start, self.iter_stop, self.iter_step):
yield (blkfirst, min(self.iter_stop, blkfirst + self.iter_step))
def n_iter_blocks(self):
'''Return the number of blocks of iterations (as returned by ``iter_block_iter``)
selected by --first-iter/--last-iter/--step-iter.'''
npoints = self.iter_stop - self.iter_start
if npoints % self.iter_step == 0:
return npoints // self.iter_step
else:
return npoints // self.iter_step + 1
def record_data_iter_range(self, h5object, iter_start=None, iter_stop=None):
'''Store attributes ``iter_start`` and ``iter_stop`` on the given HDF5 object (group/dataset)'''
iter_start = self.iter_start if iter_start is None else iter_start
iter_stop = self.iter_stop if iter_stop is None else iter_stop
h5object.attrs['iter_start'] = iter_start
h5object.attrs['iter_stop'] = iter_stop
def record_data_iter_step(self, h5object, iter_step=None):
'''Store attribute ``iter_step`` on the given HDF5 object (group/dataset).'''
iter_step = self.iter_step if iter_step is None else iter_step
h5object.attrs['iter_step'] = iter_step
def check_data_iter_range_least(self, h5object, iter_start=None, iter_stop=None):
'''Check that the given HDF5 object contains (as denoted by its ``iter_start``/``iter_stop`` attributes)
data at least for the iteration range specified.'''
iter_start = self.iter_start if iter_start is None else iter_start
iter_stop = self.iter_stop if iter_stop is None else iter_stop
return h5io.check_iter_range_least(h5object, iter_start, iter_stop)
def check_data_iter_range_equal(self, h5object, iter_start=None, iter_stop=None):
'''Check that the given HDF5 object contains (as denoted by its ``iter_start``/``iter_stop`` attributes)
data exactly for the iteration range specified.'''
iter_start = self.iter_start if iter_start is None else iter_start
iter_stop = self.iter_stop if iter_stop is None else iter_stop
return h5io.check_iter_range_equal(h5object, iter_start, iter_stop)
def check_data_iter_step_conformant(self, h5object, iter_step=None):
'''Check that the given HDF5 object contains per-iteration data at an iteration stride suitable for extracting data
with the given stride (in other words, the given ``iter_step`` is a multiple of the stride with
which data was recorded).'''
iter_step = iter_step or self.iter_step
obj_iter_step = h5object.attrs.get('iter_step')
return obj_iter_step % iter_step == 0
def check_data_iter_step_equal(self, h5object, iter_step=None):
'''Check that the given HDF5 object contains per-iteration data at an iteration stride the same as
that specified.'''
iter_step = iter_step or self.iter_step
obj_iter_step = h5object.attrs.get('iter_step')
return obj_iter_step == iter_step
def slice_per_iter_data(self, dataset, iter_start=None, iter_stop=None, iter_step=None, axis=0):
'''Return the subset of the given dataset corresponding to the given iteration range and stride. Unless
otherwise specified, the first dimension of the dataset is the one sliced.'''
iter_start = self.iter_start if iter_start is None else iter_start
iter_stop = self.iter_stop if iter_stop is None else iter_stop
iter_step = self.iter_step if iter_step is None else iter_step
ds_iter_start = dataset.attrs['iter_start']
ds_iter_stop = dataset.attrs['iter_stop']
ds_iter_step = dataset.attrs.get('iter_step', 1)
if iter_start < ds_iter_start or iter_stop > ds_iter_stop or ds_iter_step % iter_step > 0:
raise IndexError(
'Cannot slice requested iterations [{:d},{:d}) (stride={:d}) from dataset {!r} with range [{:d},{:d}) (stride={:d}).'.format(
iter_start, iter_stop, iter_step, dataset, ds_iter_start, ds_iter_stop, ds_iter_step
)
)
dimslices = []
for idim in range(len(dataset.shape)):
if idim == axis:
dimslices.append(slice(iter_start - ds_iter_start, iter_stop - ds_iter_stop + iter_step, iter_step))
else:
dimslices.append(slice(None, None, None))
dimslices = tuple(dimslices)
log.debug('slicing {!r} with {!r}'.format(dataset, dimslices))
data = dataset[dimslices]
log.debug('resulting data is of shape {!r}'.format(data.shape))
return data
def iter_range(self, iter_start=None, iter_stop=None, iter_step=None, dtype=None):
'''Return a sequence for the given iteration numbers and stride, filling
in missing values from those stored on ``self``. The smallest data type capable of
holding ``iter_stop`` is returned unless otherwise specified using the ``dtype``
argument.'''
iter_start = self.iter_start if iter_start is None else iter_start
iter_stop = self.iter_stop if iter_stop is None else iter_stop
iter_step = self.iter_step if iter_step is None else iter_step
return np.arange(iter_start, iter_stop, iter_step, dtype=(dtype or np.min_scalar_type(iter_stop)))
| mit | 83959c9fde5e42d7bf9c041d377341e1 | 43.090909 | 141 | 0.627606 | 3.888641 | false | false | false | false |
jendrikseipp/vulture | vulture/noqa.py | 1 | 1351 | from collections import defaultdict
import re
NOQA_REGEXP = re.compile(
# Use the same regex as flake8 does.
# https://github.com/pycqa/flake8/blob/main/src/flake8/defaults.py
# We're looking for items that look like this:
# `# noqa`
# `# noqa: E123`
# `# noqa: E123,W451,F921`
# `# NoQA: E123,W451,F921`
r"# noqa(?::[\s]?(?P<codes>([A-Z]+[0-9]+(?:[,\s]+)?)+))?",
re.IGNORECASE,
)
NOQA_CODE_MAP = {
# flake8 F401: module imported but unused.
"F401": "V104",
# flake8 F841: local variable is assigned to but never used.
"F841": "V107",
}
def _parse_error_codes(match):
# If no error code is specified, add the line to the "all" category.
return [
c.strip() for c in (match.groupdict()["codes"] or "all").split(",")
]
def parse_noqa(code):
noqa_lines = defaultdict(set)
for lineno, line in enumerate(code, start=1):
match = NOQA_REGEXP.search(line)
if match:
for error_code in _parse_error_codes(match):
error_code = NOQA_CODE_MAP.get(error_code, error_code)
noqa_lines[error_code].add(lineno)
return noqa_lines
def ignore_line(noqa_lines, lineno, error_code):
"""Check if the reported line is annotated with "# noqa"."""
return lineno in noqa_lines[error_code] or lineno in noqa_lines["all"]
| mit | 4518cd43fb9352e5cd2207028b3f4344 | 29.704545 | 75 | 0.612139 | 3.149184 | false | false | false | false |
geopython/pywps | pywps/processing/scheduler.py | 1 | 3108 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import os
import pywps.configuration as config
from pywps.processing.basic import Processing
from pywps.exceptions import SchedulerNotAvailable
from pywps.response.status import WPS_STATUS
import logging
LOGGER = logging.getLogger("PYWPS")
class Scheduler(Processing):
"""
:class:`Scheduler` is processing implementation to run jobs on schedulers
like slurm, grid-engine and torque. It uses the drmaa python library
as client to launch jobs on a scheduler system.
See: http://drmaa-python.readthedocs.io/en/latest/index.html
"""
def start(self):
self.job.wps_response._update_status(WPS_STATUS.ACCEPTED, 'Submitting job ...', 0)
# run remote pywps process
jobid = self.run_job()
self.job.wps_response._update_status(WPS_STATUS.ACCEPTED,
'Your job has been submitted with ID {}'.format(jobid), 0)
def run_job(self):
LOGGER.info("Submitting job ...")
try:
import drmaa
with drmaa.Session() as session:
# dump job to file
dump_filename = self.job.dump()
if not dump_filename:
raise Exception("Could not dump job status.")
# prepare remote command
jt = session.createJobTemplate()
jt.remoteCommand = os.path.join(
config.get_config_value('processing', 'path'),
'joblauncher')
if os.getenv("PYWPS_CFG"):
import shutil
cfg_file = os.path.join(self.job.workdir, "pywps.cfg")
shutil.copy2(os.getenv('PYWPS_CFG'), cfg_file)
LOGGER.debug("Copied pywps config: {}".format(cfg_file))
jt.args = ['-c', cfg_file, dump_filename]
else:
jt.args = [dump_filename]
drmaa_native_specification = config.get_config_value('processing', 'drmaa_native_specification')
if drmaa_native_specification:
jt.nativeSpecification = drmaa_native_specification
jt.joinFiles = False
jt.errorPath = ":{}".format(os.path.join(self.job.workdir, "job-error.txt"))
jt.outputPath = ":{}".format(os.path.join(self.job.workdir, "job-output.txt"))
# run job
jobid = session.runJob(jt)
LOGGER.info('Your job has been submitted with ID {}'.format(jobid))
# show status
LOGGER.info('Job status: {}'.format(session.jobStatus(jobid)))
# Cleaning up
session.deleteJobTemplate(jt)
except Exception as e:
raise SchedulerNotAvailable("Could not submit job: {}".format(str(e)))
return jobid
| mit | c1cec57fc824d8a7981c5d917cbb752a | 44.043478 | 112 | 0.553411 | 4.334728 | false | true | false | false |
geopython/pywps | pywps/tests.py | 1 | 7593 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import json
import tempfile
from pathlib import Path
import lxml
from pywps import xml_util as etree
import requests
from werkzeug.test import Client
from werkzeug.wrappers import Response
from pywps import __version__
from pywps import Process
from pywps.inout import LiteralInput, LiteralOutput, ComplexInput, ComplexOutput, BoundingBoxInput, BoundingBoxOutput
from pywps.inout import Format
from pywps.app.Common import Metadata, MetadataUrl
import re
import logging
logging.disable(logging.CRITICAL)
def service_ok(url, timeout=5):
try:
resp = requests.get(url, timeout=timeout)
if 'html' in resp.headers['content-type']:
ok = False
else:
ok = resp.ok
except requests.exceptions.ReadTimeout:
ok = False
except requests.exceptions.ConnectTimeout:
ok = False
except Exception:
ok = False
return ok
class DocExampleProcess(Process):
"""This first line is going to be skipped by the :skiplines:1 option.
Notes
-----
This is additional documentation that can be added following the Numpy docstring convention.
"""
def __init__(self):
inputs = [
LiteralInput(
'literal_input', "Literal input title", 'integer', abstract="Literal input value abstract.",
min_occurs=0, max_occurs=1, uoms=['meters', 'feet'], default=1
),
LiteralInput('date_input', 'The title is shown when no abstract is provided.', 'date',
allowed_values=['2000-01-01', '2018-01-01']),
ComplexInput('complex_input', 'Complex input title',
[Format('application/json'), Format('application/x-netcdf')],
abstract="Complex input abstract.", ),
BoundingBoxInput('bb_input', 'BoundingBox input title', ['EPSG:4326', ],
metadata=[Metadata('EPSG.io', 'http://epsg.io/'), ]),
]
outputs = [
LiteralOutput(
'literal_output', 'Literal output title', 'boolean', abstract='Boolean output abstract.'
),
ComplexOutput('complex_output', 'Complex output', [Format('text/plain'), ], ),
BoundingBoxOutput('bb_output', 'BoundingBox output title', ['EPSG:4326', ])
]
super(DocExampleProcess, self).__init__(
self._handler,
identifier='doc_example_process_identifier',
title="Process title",
abstract="Multiline process abstract.",
version="4.0",
metadata=[Metadata('PyWPS docs', 'https://pywps.org'),
Metadata('NumPy docstring conventions',
'https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt'),
MetadataUrl('Duplicate label', 'http://one.example.com', anonymous=True),
MetadataUrl('Duplicate label', 'http://two.example.com', anonymous=True),
],
inputs=inputs,
outputs=outputs,
)
def _handler(self, request, response):
pass
class WpsClient(Client):
def post_xml(self, *args, **kwargs):
doc = kwargs.pop('doc')
data = etree.tostring(doc, pretty_print=True)
kwargs['data'] = data
return self.post(*args, **kwargs)
def post_json(self, *args, **kwargs):
doc = kwargs.pop('doc')
# data = json.dumps(doc, indent=2)
# kwargs['data'] = data
kwargs['json'] = doc
# kwargs['content_type'] = 'application/json' # input is json, redundant as it's deducted from the json kwarg
# kwargs['mimetype'] = 'application/json' # output is json
kwargs['environ_base'] = {'HTTP_ACCEPT': 'application/json'} # output is json
return self.post(*args, **kwargs)
class WpsTestResponse(Response):
def __init__(self, *args):
super(WpsTestResponse, self).__init__(*args)
if re.match(r'text/xml(;\s*charset=.*)?', self.headers.get('Content-Type')):
self.xml = etree.fromstring(self.get_data())
def xpath(self, path):
version = self.xml.attrib["version"]
if version == "2.0.0":
from pywps import namespaces200
namespaces = namespaces200
else:
from pywps import namespaces100
namespaces = namespaces100
return self.xml.xpath(path, namespaces=namespaces)
def xpath_text(self, path):
return ' '.join(e.text for e in self.xpath(path))
def client_for(service):
return WpsClient(service, WpsTestResponse)
def assert_response_accepted(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath_text('/wps:ExecuteResponse'
'/wps:Status'
'/wps:ProcessAccepted')
assert success is not None
# TODO: assert status URL is present
def assert_process_started(resp):
assert resp.status_code == 200
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
success = resp.xpath_text('/wps:ExecuteResponse'
'/wps:Status'
'ProcessStarted')
# Is it still like this in PyWPS-4 ?
assert success.split[0] == "processstarted"
def assert_response_success_json(resp, expected_data):
assert resp.status_code == 200
content_type = resp.headers['Content-Type']
expected_contect_type = 'application/json'
re_content_type = rf'{expected_contect_type}(;\s*charset=.*)?'
assert re.match(re_content_type, content_type)
data = json.loads(resp.data)
success = data['status']['status']
assert success == 'succeeded'
if expected_data:
outputs = data['outputs']
assert outputs == expected_data
def assert_response_success(resp):
assert resp.status_code == 200
content_type = resp.headers['Content-Type']
expected_contect_type = 'text/xml'
re_content_type = rf'{expected_contect_type}(;\s*charset=.*)?'
assert re.match(re_content_type, content_type)
success = resp.xpath('/wps:ExecuteResponse/wps:Status/wps:ProcessSucceeded')
assert len(success) == 1
def assert_process_exception(resp, code=None):
assert resp.status_code == 400
assert re.match(r'text/xml(;\s*charset=.*)?', resp.headers['Content-Type'])
elem = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert elem[0].attrib['exceptionCode'] == code
def assert_pywps_version(resp):
# get first child of root element
root_firstchild = resp.xpath('/*')[0].getprevious()
assert isinstance(root_firstchild, lxml.etree._Comment)
tokens = root_firstchild.text.split()
assert len(tokens) == 2
assert tokens[0] == 'PyWPS'
assert tokens[1] == __version__
def assert_wps_version(response, version="1.0.0"):
elem = response.xpath('/wps:Capabilities'
'/ows:ServiceIdentification'
'/ows:ServiceTypeVersion')
found_version = elem[0].text
assert version == found_version
tmp = Path(tempfile.mkdtemp())
with open(tmp / "out.xml", "wb") as out:
out.writelines(response.response)
| mit | 1345dcca86c3409ba38d304b6a10d2b5 | 34.816038 | 118 | 0.598973 | 4.05609 | false | false | false | false |
geopython/pywps | pywps/response/basic.py | 1 | 2117 | from abc import abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pywps import WPSRequest
from pywps.dblog import store_status
from . import RelEnvironment
from .status import WPS_STATUS
from pywps.translations import get_translation
from jinja2 import Environment, PackageLoader
import os
class WPSResponse(object):
def __init__(self, wps_request: 'WPSRequest', uuid=None, version="1.0.0"):
self.wps_request = wps_request
self.uuid = uuid
self.message = ''
self.status = WPS_STATUS.ACCEPTED
self.status_percentage = 0
self.doc = None
self.content_type = None
self.version = version
self.template_env = RelEnvironment(
loader=PackageLoader('pywps', 'templates'),
trim_blocks=True, lstrip_blocks=True,
autoescape=True,
)
self.template_env.globals.update(get_translation=get_translation)
def _update_status(self, status, message, status_percentage):
"""
Update status report of currently running process instance
:param str message: Message you need to share with the client
:param int status_percentage: Percent done (number betwen <0-100>)
:param pywps.response.status.WPS_STATUS status: process status - user should usually
ommit this parameter
"""
self.message = message
self.status = status
self.status_percentage = status_percentage
store_status(self.uuid, self.status, self.message, self.status_percentage)
@abstractmethod
def _construct_doc(self):
...
def get_response_doc(self):
try:
self.doc, self.content_type = self._construct_doc()
except Exception as e:
if hasattr(e, "description"):
msg = e.description
else:
msg = e
self._update_status(WPS_STATUS.FAILED, msg, 100)
raise e
else:
self._update_status(WPS_STATUS.SUCCEEDED, "Response generated", 100)
return self.doc, self.content_type
| mit | f22c600ad2b7ad409bd422c0926bafe5 | 31.569231 | 92 | 0.633444 | 4.118677 | false | false | false | false |
geopython/pywps | pywps/response/execute.py | 1 | 11499 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import json
import logging
import time
from werkzeug.wrappers import Request
from pywps import get_ElementMakerForVersion
from pywps.app.basic import get_response_type, get_json_indent, get_default_response_mimetype
from pywps.exceptions import NoApplicableCode
import pywps.configuration as config
from werkzeug.wrappers import Response
from pywps.inout.array_encode import ArrayEncoder
from pywps.response.status import WPS_STATUS
from .basic import WPSResponse
from pywps.inout.formats import FORMATS
from pywps.inout.outputs import ComplexOutput
import urllib.parse as urlparse
from urllib.parse import urlencode
LOGGER = logging.getLogger("PYWPS")
WPS, OWS = get_ElementMakerForVersion("1.0.0")
class ExecuteResponse(WPSResponse):
def __init__(self, wps_request, uuid, **kwargs):
"""constructor
:param pywps.app.WPSRequest.WPSRequest wps_request:
:param pywps.app.Process.Process process:
:param uuid: string this request uuid
"""
super(ExecuteResponse, self).__init__(wps_request, uuid)
self.process = kwargs["process"]
self.outputs = {o.identifier: o for o in self.process.outputs}
self.store_status_file = False
# override WPSResponse._update_status
def _update_status(self, status, message, status_percentage, clean=True):
"""
Updates status report of currently running process instance:
* Updates the status document.
* Updates the status file (if requested).
* Cleans the working directory when process has finished.
This method is *only* called by pywps internally.
"""
super(ExecuteResponse, self)._update_status(status, message, status_percentage)
LOGGER.debug("_update_status: status={}, clean={}".format(status, clean))
self._update_status_doc()
if self.store_status_file:
self._update_status_file()
if clean:
if self.status == WPS_STATUS.SUCCEEDED or self.status == WPS_STATUS.FAILED:
LOGGER.debug("clean workdir: status={}".format(status))
self.process.clean()
def update_status(self, message, status_percentage=None):
"""
Update status report of currently running process instance.
This method is *only* called by the user provided process.
The status is handled internally in pywps.
:param str message: Message you need to share with the client
:param int status_percentage: Percent done (number betwen <0-100>)
"""
if status_percentage is None:
status_percentage = self.status_percentage
self._update_status(self.status, message, status_percentage, False)
def _update_status_doc(self):
try:
# rebuild the doc
self.doc, self.content_type = self._construct_doc()
except Exception as e:
raise NoApplicableCode('Building Response Document failed with : {}'.format(e))
def _update_status_file(self):
# TODO: check if file/directory is still present, maybe deleted in mean time
try:
# update the status xml file
self.process.status_store.write(
self.doc,
self.process.status_filename,
data_format=FORMATS.XML)
except Exception as e:
raise NoApplicableCode('Writing Response Document failed with : {}'.format(e))
def _process_accepted(self):
percent = int(self.status_percentage)
if percent > 99:
percent = 99
return {
"status": "accepted",
"time": time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime()),
"percent_done": str(percent),
"message": self.message
}
def _process_started(self):
data = self._process_accepted()
data.update({
"status": "started",
})
return data
def _process_paused(self):
data = self._process_accepted()
data.update({
"status": "paused",
})
return data
def _process_succeeded(self):
data = self._process_accepted()
data.update({
"status": "succeeded",
"percent_done": "100"
})
return data
def _process_failed(self):
data = self._process_accepted()
data.update({
"status": "failed",
"code": "NoApplicableCode",
"locator": "None",
})
return data
def _get_serviceinstance(self):
url = config.get_config_value("server", "url")
params = {'request': 'GetCapabilities', 'service': 'WPS'}
url_parts = list(urlparse.urlparse(url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
return urlparse.urlunparse(url_parts).replace("&", "&")
@property
def json(self):
data = {}
data["language"] = self.wps_request.language
data["service_instance"] = self._get_serviceinstance()
data["process"] = self.process.json
if self.store_status_file:
if self.process.status_location:
data["status_location"] = self.process.status_url
if self.status == WPS_STATUS.ACCEPTED:
self.message = 'PyWPS Process {} accepted'.format(self.process.identifier)
data["status"] = self._process_accepted()
elif self.status == WPS_STATUS.STARTED:
data["status"] = self._process_started()
elif self.status == WPS_STATUS.FAILED:
# check if process failed and display fail message
data["status"] = self._process_failed()
elif self.status == WPS_STATUS.PAUSED:
# TODO: handle paused status
data["status"] = self._process_paused()
elif self.status == WPS_STATUS.SUCCEEDED:
data["status"] = self._process_succeeded()
# Process outputs XML
data["outputs"] = [self.outputs[o].json for o in self.outputs]
# lineage: add optional lineage when process has finished
if self.status in [WPS_STATUS.SUCCEEDED, WPS_STATUS.FAILED]:
# DataInputs and DataOutputs definition XML if lineage=true
if self.wps_request.lineage == 'true':
data["lineage"] = True
try:
# TODO: stored process has ``pywps.inout.basic.LiteralInput``
# instead of a ``pywps.inout.inputs.LiteralInput``.
data["input_definitions"] = [self.wps_request.inputs[i][0].json for i in self.wps_request.inputs]
except Exception as e:
LOGGER.error("Failed to update lineage for input parameter. {}".format(e))
data["output_definitions"] = [self.outputs[o].json for o in self.outputs]
return data
@staticmethod
def _render_json_response(jdoc):
response = dict()
response['status'] = jdoc['status']
out = jdoc['process']['outputs']
d = {}
for val in out:
id = val.get('identifier')
if id is None:
continue
type = val.get('type')
key = 'bbox' if type == 'bbox' else 'data'
if key in val:
d[id] = val[key]
response['outputs'] = d
return response
def _construct_doc(self):
if self.status == WPS_STATUS.SUCCEEDED and \
hasattr(self.wps_request, 'preprocess_response') and \
self.wps_request.preprocess_response:
self.outputs = self.wps_request.preprocess_response(self.outputs,
request=self.wps_request,
http_request=self.wps_request.http_request)
doc = self.json
try:
json_response, mimetype = get_response_type(
self.wps_request.http_request.accept_mimetypes, self.wps_request.default_mimetype)
except Exception:
mimetype = get_default_response_mimetype()
json_response = 'json' in mimetype
if json_response:
doc = json.dumps(self._render_json_response(doc), cls=ArrayEncoder, indent=get_json_indent())
else:
template = self.template_env.get_template(self.version + '/execute/main.xml')
doc = template.render(**doc)
return doc, mimetype
@Request.application
def __call__(self, request):
accept_json_response, accepted_mimetype = get_response_type(
self.wps_request.http_request.accept_mimetypes, self.wps_request.default_mimetype)
if self.wps_request.raw:
if self.status == WPS_STATUS.FAILED:
return NoApplicableCode(self.message)
else:
wps_output_identifier = next(iter(self.wps_request.outputs)) # get the first key only
wps_output_value = self.outputs[wps_output_identifier]
response = wps_output_value.data
if response is None:
return NoApplicableCode("Expected output was not generated")
suffix = ''
# if isinstance(wps_output_value, ComplexOutput):
data_format = None
if hasattr(wps_output_value, 'output_format'):
# this is set in the response, thus should be more precise
data_format = wps_output_value.output_format
elif hasattr(wps_output_value, 'data_format'):
# this is set in the process' response _handler function, thus could have a few supported formats
data_format = wps_output_value.data_format
if data_format is not None:
mimetype = data_format.mime_type
if data_format.extension is not None:
suffix = data_format.extension
else:
# like LitearlOutput
mimetype = self.wps_request.outputs[wps_output_identifier].get('mimetype', None)
if not isinstance(response, (str, bytes, bytearray)):
if not mimetype:
mimetype = accepted_mimetype
json_response = mimetype and 'json' in mimetype
if json_response:
mimetype = 'application/json'
suffix = '.json'
response = json.dumps(response, cls=ArrayEncoder, indent=get_json_indent())
else:
response = str(response)
if not mimetype:
mimetype = None
return Response(response, mimetype=mimetype,
headers={'Content-Disposition': 'attachment; filename="{}"'
.format(wps_output_identifier + suffix)})
else:
if not self.doc:
return NoApplicableCode("Output was not generated")
return Response(self.doc, mimetype=accepted_mimetype)
| mit | 4ac70119a1bfea4231443860772e88f3 | 40.215054 | 117 | 0.574311 | 4.390607 | false | false | false | false |
geopython/pywps | pywps/app/WPSRequest.py | 1 | 31469 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import logging
import lxml
from pywps import xml_util as etree
from werkzeug.exceptions import MethodNotAllowed
from pywps import get_ElementMakerForVersion
import base64
import datetime
from pywps.app.basic import get_xpath_ns, parse_http_url
from pywps.inout.inputs import input_from_json
from pywps.exceptions import NoApplicableCode, OperationNotSupported, MissingParameterValue, VersionNegotiationFailed, \
InvalidParameterValue, FileSizeExceeded
from pywps import configuration
from pywps.configuration import wps_strict
from pywps import get_version_from_ns
import json
from urllib.parse import unquote
LOGGER = logging.getLogger("PYWPS")
default_version = '1.0.0'
class WPSRequest(object):
def __init__(self, http_request=None, preprocessors=None):
self.http_request = http_request
self.operation = None
self.version = None
self.api = None
self.default_mimetype = None
self.language = None
self.identifier = None
self.identifiers = None
self.store_execute = None
self.status = None
self.lineage = None
self.inputs = {}
self.output_ids = None
self.outputs = {}
self.raw = None
self.WPS = None
self.OWS = None
self.xpath_ns = None
self.preprocessors = preprocessors or dict()
self.preprocess_request = None
self.preprocess_response = None
if http_request:
d = parse_http_url(http_request)
self.operation = d.get('operation')
self.identifier = d.get('identifier')
self.output_ids = d.get('output_ids')
self.api = d.get('api')
self.default_mimetype = d.get('default_mimetype')
request_parser = self._get_request_parser_method(http_request.method)
request_parser()
def _get_request_parser_method(self, method):
if method == 'GET':
return self._get_request
elif method == 'POST':
return self._post_request
else:
raise MethodNotAllowed()
def _get_request(self):
"""HTTP GET request parser
"""
# service shall be WPS
service = _get_get_param(self.http_request, 'service', None if wps_strict else 'wps')
if service:
if str(service).lower() != 'wps':
raise InvalidParameterValue(
'parameter SERVICE [{}] not supported'.format(service), 'service')
else:
raise MissingParameterValue('service', 'service')
self.operation = _get_get_param(self.http_request, 'request', self.operation)
language = _get_get_param(self.http_request, 'language')
self.check_and_set_language(language)
request_parser = self._get_request_parser(self.operation)
request_parser(self.http_request)
def _post_request(self):
"""HTTP GET request parser
"""
# check if input file size was not exceeded
maxsize = configuration.get_config_value('server', 'maxrequestsize')
maxsize = configuration.get_size_mb(maxsize) * 1024 * 1024
if self.http_request.content_length > maxsize:
raise FileSizeExceeded('File size for input exceeded.'
' Maximum request size allowed: {} megabytes'.format(maxsize / 1024 / 1024))
content_type = self.http_request.content_type or [] # or self.http_request.mimetype
json_input = 'json' in content_type
if not json_input:
try:
doc = etree.fromstring(self.http_request.get_data())
except Exception as e:
raise NoApplicableCode(str(e))
operation = doc.tag
version = get_version_from_ns(doc.nsmap[doc.prefix])
self.set_version(version)
language = doc.attrib.get('language')
self.check_and_set_language(language)
request_parser = self._post_request_parser(operation)
request_parser(doc)
else:
try:
jdoc = json.loads(self.http_request.get_data())
except Exception as e:
raise NoApplicableCode(str(e))
if self.identifier is not None:
jdoc = {'inputs': jdoc}
else:
self.identifier = jdoc.get('identifier', None)
self.operation = jdoc.get('operation', self.operation)
preprocessor_tuple = self.preprocessors.get(self.identifier, None)
if preprocessor_tuple:
self.identifier = preprocessor_tuple[0]
self.preprocess_request = preprocessor_tuple[1]
self.preprocess_response = preprocessor_tuple[2]
jdoc['operation'] = self.operation
jdoc['identifier'] = self.identifier
jdoc['api'] = self.api
jdoc['default_mimetype'] = self.default_mimetype
if self.preprocess_request is not None:
jdoc = self.preprocess_request(jdoc, http_request=self.http_request)
self.json = jdoc
version = jdoc.get('version')
self.set_version(version)
language = jdoc.get('language')
self.check_and_set_language(language)
request_parser = self._post_json_request_parser()
request_parser(jdoc)
def _get_request_parser(self, operation):
"""Factory function returing propper parsing function
"""
wpsrequest = self
def parse_get_getcapabilities(http_request):
"""Parse GET GetCapabilities request
"""
acceptedversions = _get_get_param(http_request, 'acceptversions')
wpsrequest.check_accepted_versions(acceptedversions)
wpsrequest.default_mimetype = _get_get_param(http_request, 'f', wpsrequest.default_mimetype)
def parse_get_describeprocess(http_request):
"""Parse GET DescribeProcess request
"""
version = _get_get_param(http_request, 'version')
wpsrequest.check_and_set_version(version)
wpsrequest.identifiers = _get_get_param(
http_request, 'identifier', wpsrequest.identifiers, aslist=True)
if wpsrequest.identifiers is None and self.identifier is not None:
wpsrequest.identifiers = [wpsrequest.identifier]
wpsrequest.default_mimetype = _get_get_param(http_request, 'f', wpsrequest.default_mimetype)
def parse_get_execute(http_request):
"""Parse GET Execute request
"""
version = _get_get_param(http_request, 'version')
wpsrequest.check_and_set_version(version)
wpsrequest.identifier = _get_get_param(http_request, 'identifier', wpsrequest.identifier)
wpsrequest.store_execute = _get_get_param(
http_request, 'storeExecuteResponse', 'false')
wpsrequest.status = _get_get_param(http_request, 'status', 'false')
wpsrequest.lineage = _get_get_param(
http_request, 'lineage', 'false')
wpsrequest.inputs = get_data_from_kvp(
_get_get_param(http_request, 'DataInputs'), 'DataInputs')
if self.inputs is None:
self.inputs = {}
# take responseDocument preferably
raw, output_ids = False, _get_get_param(http_request, 'ResponseDocument')
if output_ids is None:
raw, output_ids = True, _get_get_param(http_request, 'RawDataOutput')
if output_ids is not None:
wpsrequest.raw, wpsrequest.output_ids = raw, output_ids
elif wpsrequest.raw is None:
wpsrequest.raw = wpsrequest.output_ids is not None
wpsrequest.default_mimetype = _get_get_param(http_request, 'f', wpsrequest.default_mimetype)
wpsrequest.outputs = get_data_from_kvp(wpsrequest.output_ids) or {}
if wpsrequest.raw:
# executeResponse XML will not be stored and no updating of
# status
wpsrequest.store_execute = 'false'
wpsrequest.status = 'false'
if operation:
self.operation = operation.lower()
else:
if wps_strict:
raise MissingParameterValue('Missing request value', 'request')
self.operation = 'execute'
if self.operation == 'getcapabilities':
return parse_get_getcapabilities
elif self.operation == 'describeprocess':
return parse_get_describeprocess
elif self.operation == 'execute':
return parse_get_execute
else:
raise OperationNotSupported(
'Unknown request {}'.format(self.operation), operation)
def _post_request_parser(self, tagname):
"""Factory function returning a proper parsing function
according to tagname and sets self.operation to the correct operation
"""
wpsrequest = self
def parse_post_getcapabilities(doc):
"""Parse POST GetCapabilities request
"""
acceptedversions = self.xpath_ns(
doc, '/wps:GetCapabilities/ows:AcceptVersions/ows:Version')
acceptedversions = ','.join(
[v.text for v in acceptedversions])
wpsrequest.check_accepted_versions(acceptedversions)
def parse_post_describeprocess(doc):
"""Parse POST DescribeProcess request
"""
version = doc.attrib.get('version')
wpsrequest.check_and_set_version(version)
wpsrequest.operation = 'describeprocess'
wpsrequest.identifiers = [identifier_el.text for identifier_el in
self.xpath_ns(doc, './ows:Identifier')]
def parse_post_execute(doc):
"""Parse POST Execute request
"""
version = doc.attrib.get('version')
wpsrequest.check_and_set_version(version)
wpsrequest.operation = 'execute'
identifier = self.xpath_ns(doc, './ows:Identifier')
if not identifier:
raise MissingParameterValue(
'Process identifier not set', 'Identifier')
wpsrequest.identifier = identifier[0].text
wpsrequest.lineage = 'false'
wpsrequest.store_execute = 'false'
wpsrequest.status = 'false'
wpsrequest.inputs = get_inputs_from_xml(doc)
wpsrequest.outputs = get_output_from_xml(doc)
wpsrequest.raw = False
if self.xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:RawDataOutput'):
wpsrequest.raw = True
# executeResponse XML will not be stored
wpsrequest.store_execute = 'false'
# check if response document tag has been set then retrieve
response_document = self.xpath_ns(
doc, './wps:ResponseForm/wps:ResponseDocument')
if len(response_document) > 0:
wpsrequest.lineage = response_document[
0].attrib.get('lineage', 'false')
wpsrequest.store_execute = response_document[
0].attrib.get('storeExecuteResponse', 'false')
wpsrequest.status = response_document[
0].attrib.get('status', 'false')
if tagname == self.WPS.GetCapabilities().tag:
self.operation = 'getcapabilities'
return parse_post_getcapabilities
elif tagname == self.WPS.DescribeProcess().tag:
self.operation = 'describeprocess'
return parse_post_describeprocess
elif tagname == self.WPS.Execute().tag:
self.operation = 'execute'
return parse_post_execute
else:
raise InvalidParameterValue(
'Unknown request {}'.format(tagname), 'request')
def _post_json_request_parser(self):
"""
Factory function returning a proper parsing function
according to self.operation.
self.operation is modified to be lowercase
or the default 'execute' operation if self.operation is None
"""
wpsrequest = self
def parse_json_post_getcapabilities(jdoc):
"""Parse POST GetCapabilities request
"""
acceptedversions = jdoc.get('acceptedversions')
wpsrequest.check_accepted_versions(acceptedversions)
def parse_json_post_describeprocess(jdoc):
"""Parse POST DescribeProcess request
"""
version = jdoc.get('version')
wpsrequest.check_and_set_version(version)
wpsrequest.identifiers = [identifier_el.text for identifier_el in
self.xpath_ns(jdoc, './ows:Identifier')]
def parse_json_post_execute(jdoc):
"""Parse POST Execute request
"""
version = jdoc.get('version')
wpsrequest.check_and_set_version(version)
wpsrequest.identifier = jdoc.get('identifier')
if wpsrequest.identifier is None:
raise MissingParameterValue(
'Process identifier not set', 'Identifier')
wpsrequest.lineage = 'false'
wpsrequest.store_execute = 'false'
wpsrequest.status = 'false'
wpsrequest.inputs = get_inputs_from_json(jdoc)
if wpsrequest.output_ids is None:
wpsrequest.output_ids = jdoc.get('outputs', {})
wpsrequest.raw = jdoc.get('raw', False)
wpsrequest.raw, wpsrequest.outputs = get_output_from_dict(wpsrequest.output_ids, wpsrequest.raw)
if wpsrequest.raw:
# executeResponse XML will not be stored
wpsrequest.store_execute = 'false'
# todo: parse response_document like in the xml version?
self.operation = 'execute' if self.operation is None else self.operation.lower()
if self.operation == 'getcapabilities':
return parse_json_post_getcapabilities
elif self.operation == 'describeprocess':
return parse_json_post_describeprocess
elif self.operation == 'execute':
return parse_json_post_execute
else:
raise InvalidParameterValue(
'Unknown request {}'.format(self.operation), 'request')
def set_version(self, version):
self.version = version
self.xpath_ns = get_xpath_ns(version)
self.WPS, self.OWS = get_ElementMakerForVersion(self.version)
def check_accepted_versions(self, acceptedversions):
"""
:param acceptedversions: string
"""
version = None
if acceptedversions:
acceptedversions_array = acceptedversions.split(',')
for aversion in acceptedversions_array:
if _check_version(aversion):
version = aversion
else:
version = '1.0.0'
if version:
self.check_and_set_version(version)
else:
raise VersionNegotiationFailed(
'The requested version "{}" is not supported by this server'.format(acceptedversions), 'version')
def check_and_set_version(self, version, allow_default=True):
"""set this.version
"""
if not version:
if allow_default:
version = default_version
else:
raise MissingParameterValue('Missing version', 'version')
if not _check_version(version):
raise VersionNegotiationFailed(
'The requested version "{}" is not supported by this server'.format(version), 'version')
else:
self.set_version(version)
def check_and_set_language(self, language):
"""set this.language
"""
supported_languages = configuration.get_config_value('server', 'language').split(',')
supported_languages = [lang.strip() for lang in supported_languages]
if not language:
# default to the first supported language
language = supported_languages[0]
if language not in supported_languages:
raise InvalidParameterValue(
'The requested language "{}" is not supported by this server'.format(language),
'language',
)
self.language = language
@property
def json(self):
"""Return JSON encoded representation of the request
"""
class ExtendedJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
obj = {
'operation': self.operation,
'version': self.version,
'api': self.api,
'default_mimetype': self.default_mimetype,
'language': self.language,
'identifier': self.identifier,
'identifiers': self.identifiers,
'store_execute': self.store_execute,
'status': self.status,
'lineage': self.lineage,
'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),
'outputs': self.outputs,
'raw': self.raw
}
return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)
@json.setter
def json(self, value):
"""init this request from json back again
:param value: the json (not string) representation
"""
self.operation = value.get('operation')
self.version = value.get('version')
self.api = value.get('api')
self.default_mimetype = value.get('default_mimetype')
self.language = value.get('language')
self.identifier = value.get('identifier')
self.identifiers = value.get('identifiers')
self.store_execute = value.get('store_execute')
self.status = value.get('status', False)
self.lineage = value.get('lineage', False)
self.outputs = value.get('outputs')
self.raw = value.get('raw', False)
self.inputs = {}
for identifier in value.get('inputs', []):
inpt_defs = value['inputs'][identifier]
if not isinstance(inpt_defs, (list, tuple)):
inpt_defs = [inpt_defs]
self.inputs[identifier] = []
for inpt_def in inpt_defs:
if not isinstance(inpt_def, dict):
inpt_def = {"data": inpt_def}
if 'identifier' not in inpt_def:
inpt_def['identifier'] = identifier
try:
inpt = input_from_json(inpt_def)
self.inputs[identifier].append(inpt)
except Exception as e:
LOGGER.warning(e)
LOGGER.warning(f'skipping input: {identifier}')
pass
def get_inputs_from_xml(doc):
the_inputs = {}
version = get_version_from_ns(doc.nsmap[doc.prefix])
xpath_ns = get_xpath_ns(version)
for input_el in xpath_ns(doc, '/wps:Execute/wps:DataInputs/wps:Input'):
[identifier_el] = xpath_ns(input_el, './ows:Identifier')
identifier = identifier_el.text
if identifier not in the_inputs:
the_inputs[identifier] = []
literal_data = xpath_ns(input_el, './wps:Data/wps:LiteralData')
if literal_data:
value_el = literal_data[0]
inpt = {}
inpt['identifier'] = identifier_el.text
inpt['data'] = str(value_el.text)
inpt['uom'] = value_el.attrib.get('uom', '')
inpt['datatype'] = value_el.attrib.get('datatype', '')
the_inputs[identifier].append(inpt)
continue
complex_data = xpath_ns(input_el, './wps:Data/wps:ComplexData')
if complex_data:
complex_data_el = complex_data[0]
inpt = {}
inpt['identifier'] = identifier_el.text
inpt['mimeType'] = complex_data_el.attrib.get('mimeType', None)
inpt['encoding'] = complex_data_el.attrib.get('encoding', '').lower()
inpt['schema'] = complex_data_el.attrib.get('schema', '')
inpt['method'] = complex_data_el.attrib.get('method', 'GET')
if len(complex_data_el.getchildren()) > 0:
value_el = complex_data_el[0]
inpt['data'] = _get_dataelement_value(value_el)
else:
inpt['data'] = _get_rawvalue_value(
complex_data_el.text, inpt['encoding'])
the_inputs[identifier].append(inpt)
continue
reference_data = xpath_ns(input_el, './wps:Reference')
if reference_data:
reference_data_el = reference_data[0]
inpt = {}
inpt['identifier'] = identifier_el.text
inpt[identifier_el.text] = reference_data_el.text
inpt['href'] = reference_data_el.attrib.get(
'{http://www.w3.org/1999/xlink}href', '')
inpt['mimeType'] = reference_data_el.attrib.get('mimeType', None)
inpt['method'] = reference_data_el.attrib.get('method', 'GET')
header_element = xpath_ns(reference_data_el, './wps:Header')
if header_element:
inpt['header'] = _get_reference_header(header_element)
body_element = xpath_ns(reference_data_el, './wps:Body')
if body_element:
inpt['body'] = _get_reference_body(body_element[0])
bodyreference_element = xpath_ns(reference_data_el,
'./wps:BodyReference')
if bodyreference_element:
inpt['bodyreference'] = _get_reference_bodyreference(
bodyreference_element[0])
the_inputs[identifier].append(inpt)
continue
# Using OWSlib BoundingBox
from owslib.ows import BoundingBox
bbox_datas = xpath_ns(input_el, './wps:Data/wps:BoundingBoxData')
if bbox_datas:
for bbox_data in bbox_datas:
bbox = BoundingBox(bbox_data)
LOGGER.debug("parse bbox: minx={}, miny={}, maxx={},maxy={}".format(
bbox.minx, bbox.miny, bbox.maxx, bbox.maxy))
inpt = {}
inpt['identifier'] = identifier_el.text
inpt['data'] = [bbox.minx, bbox.miny, bbox.maxx, bbox.maxy]
inpt['crs'] = bbox.crs.getcodeurn() if bbox.crs else None
inpt['dimensions'] = bbox.dimensions
the_inputs[identifier].append(inpt)
return the_inputs
def get_output_from_xml(doc):
the_output = {}
version = get_version_from_ns(doc.nsmap[doc.prefix])
xpath_ns = get_xpath_ns(version)
if xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:ResponseDocument'):
for output_el in xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:ResponseDocument/wps:Output'):
[identifier_el] = xpath_ns(output_el, './ows:Identifier')
outpt = {}
outpt[identifier_el.text] = ''
outpt['mimetype'] = output_el.attrib.get('mimeType', None)
outpt['encoding'] = output_el.attrib.get('encoding', '')
outpt['schema'] = output_el.attrib.get('schema', '')
outpt['uom'] = output_el.attrib.get('uom', '')
outpt['asReference'] = output_el.attrib.get('asReference', 'false')
the_output[identifier_el.text] = outpt
elif xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:RawDataOutput'):
for output_el in xpath_ns(doc, '/wps:Execute/wps:ResponseForm/wps:RawDataOutput'):
[identifier_el] = xpath_ns(output_el, './ows:Identifier')
outpt = {}
outpt[identifier_el.text] = ''
outpt['mimetype'] = output_el.attrib.get('mimeType', None)
outpt['encoding'] = output_el.attrib.get('encoding', '')
outpt['schema'] = output_el.attrib.get('schema', '')
outpt['uom'] = output_el.attrib.get('uom', '')
the_output[identifier_el.text] = outpt
return the_output
def get_inputs_from_json(jdoc):
the_inputs = {}
inputs_dict = jdoc.get('inputs', {})
for identifier, inpt_defs in inputs_dict.items():
if not isinstance(inpt_defs, (list, tuple)):
inpt_defs = [inpt_defs]
the_inputs[identifier] = []
for inpt_def in inpt_defs:
if not isinstance(inpt_def, dict):
inpt_def = {"data": inpt_def}
data_type = inpt_def.get('type', 'literal')
inpt = {'identifier': identifier}
if data_type == 'literal':
inpt['data'] = inpt_def.get('data')
inpt['uom'] = inpt_def.get('uom', '')
inpt['datatype'] = inpt_def.get('datatype', '')
the_inputs[identifier].append(inpt)
elif data_type == 'complex':
inpt['mimeType'] = inpt_def.get('mimeType', None)
inpt['encoding'] = inpt_def.get('encoding', '').lower()
inpt['schema'] = inpt_def.get('schema', '')
inpt['method'] = inpt_def.get('method', 'GET')
inpt['data'] = _get_rawvalue_value(inpt_def.get('data', ''), inpt['encoding'])
the_inputs[identifier].append(inpt)
elif data_type == 'reference':
inpt[identifier] = inpt_def
inpt['href'] = inpt_def.get('href', '')
inpt['mimeType'] = inpt_def.get('mimeType', None)
inpt['method'] = inpt_def.get('method', 'GET')
inpt['header'] = inpt_def.get('header', '')
inpt['body'] = inpt_def.get('body', '')
inpt['bodyreference'] = inpt_def.get('bodyreference', '')
the_inputs[identifier].append(inpt)
elif data_type == 'bbox':
inpt['data'] = inpt_def['bbox']
inpt['crs'] = inpt_def.get('crs', 'urn:ogc:def:crs:EPSG::4326')
inpt['dimensions'] = inpt_def.get('dimensions', 2)
the_inputs[identifier].append(inpt)
return the_inputs
def get_output_from_dict(output_ids, raw):
the_output = {}
if isinstance(output_ids, dict):
pass
elif isinstance(output_ids, (tuple, list)):
output_ids = {x: {} for x in output_ids}
else:
output_ids = {output_ids: {}}
raw = True # single non-dict output means raw output
for identifier, output_el in output_ids.items():
if isinstance(output_el, list):
output_el = output_el[0]
outpt = {}
outpt[identifier] = ''
outpt['mimetype'] = output_el.get('mimeType', None)
outpt['encoding'] = output_el.get('encoding', '')
outpt['schema'] = output_el.get('schema', '')
outpt['uom'] = output_el.get('uom', '')
if not raw:
outpt['asReference'] = output_el.get('asReference', 'false')
the_output[identifier] = outpt
return raw, the_output
def get_data_from_kvp(data, part=None):
"""Get execute DataInputs and ResponseDocument from URL (key-value-pairs) encoding
:param data: key:value pair list of the datainputs and responseDocument parameter
:param part: DataInputs or similar part of input url
"""
the_data = {}
if data is None:
return None
for d in data.split(";"):
try:
io = {}
fields = d.split('@')
# First field is identifier and its value
(identifier, val) = fields[0].split("=")
io['identifier'] = identifier
io['data'] = unquote(val)
# Get the attributes of the data
for attr in fields[1:]:
(attribute, attr_val) = attr.split('=', 1)
if attribute == 'xlink:href':
io['href'] = unquote(attr_val)
else:
io[attribute] = unquote(attr_val)
# Add the input/output with all its attributes and values to the
# dictionary
if part == 'DataInputs':
if identifier not in the_data:
the_data[identifier] = []
the_data[identifier].append(io)
else:
the_data[identifier] = io
except Exception as e:
LOGGER.warning(e)
the_data[d] = {'identifier': d, 'data': ''}
return the_data
def _check_version(version):
""" check given version
"""
if version not in ['1.0.0', '2.0.0']:
return False
else:
return True
def _get_get_param(http_request, key, default=None, aslist=False):
"""Returns value from the key:value pair, of the HTTP GET request, for
example 'service' or 'request'
:param http_request: http_request object
:param key: key value you need to dig out of the HTTP GET request
"""
key = key.lower()
value = default
# http_request.args.keys will make + sign disappear in GET url if not
# urlencoded
for k in http_request.args.keys():
if k.lower() == key:
value = http_request.args.get(k)
if aslist:
value = value.split(",")
return value
def _get_dataelement_value(value_el):
"""Return real value of XML Element (e.g. convert Element.FeatureCollection
to String
"""
if isinstance(value_el, lxml.etree._Element):
return etree.tostring(value_el, encoding=str)
else:
return value_el
def _get_rawvalue_value(data, encoding=None):
"""Return real value of CDATA section"""
try:
LOGGER.debug("encoding={}".format(encoding))
if encoding is None or encoding == "":
return data
elif encoding == "utf-8":
return data
elif encoding == 'base64':
return base64.b64decode(data)
return base64.b64decode(data)
except Exception:
LOGGER.warning("failed to decode base64")
return data
def _get_reference_header(header_element):
"""Parses ReferenceInput Header element
"""
header = {}
header['key'] = header_element.attrib('key')
header['value'] = header_element.attrib('value')
return header
def _get_reference_body(body_element):
"""Parses ReferenceInput Body element
"""
body = None
if len(body_element.getchildren()) > 0:
value_el = body_element[0]
body = _get_dataelement_value(value_el)
else:
body = _get_rawvalue_value(body_element.text)
return body
def _get_reference_bodyreference(referencebody_element):
"""Parse ReferenceInput BodyReference element
"""
return referencebody_element.attrib.get(
'{http://www.w3.org/1999/xlink}href', '')
| mit | 04670d006985e75a1751153550ea737a | 37.802713 | 120 | 0.572246 | 4.134673 | false | false | false | false |
keon/algorithms | algorithms/queues/priority_queue.py | 1 | 1790 | """
Implementation of priority queue using linear array.
Insertion - O(n)
Extract min/max Node - O(1)
"""
import itertools
class PriorityQueueNode:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return "{}: {}".format(self.data, self.priority)
class PriorityQueue:
def __init__(self, items=None, priorities=None):
"""Create a priority queue with items (list or iterable).
If items is not passed, create empty priority queue."""
self.priority_queue_list = []
if items is None:
return
if priorities is None:
priorities = itertools.repeat(None)
for item, priority in zip(items, priorities):
self.push(item, priority=priority)
def __repr__(self):
return "PriorityQueue({!r})".format(self.priority_queue_list)
def size(self):
"""Return size of the priority queue.
"""
return len(self.priority_queue_list)
def push(self, item, priority=None):
"""Push the item in the priority queue.
if priority is not given, priority is set to the value of item.
"""
priority = item if priority is None else priority
node = PriorityQueueNode(item, priority)
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
self.priority_queue_list.insert(index, node)
return
# when traversed complete queue
self.priority_queue_list.append(node)
def pop(self):
"""Remove and return the item with the lowest priority.
"""
# remove and return the first node from the queue
return self.priority_queue_list.pop().data
| mit | 4940ff75f42a6fd68f25c71fcbb5ea31 | 31.545455 | 71 | 0.616201 | 4.272076 | false | false | false | false |
keon/algorithms | algorithms/strings/strip_url_params.py | 2 | 3680 | """
Write a function that does the following:
Removes any duplicate query string parameters from the url
Removes any query string parameters specified within the 2nd argument (optional array)
An example:
www.saadbenn.com?a=1&b=2&a=2') // returns 'www.saadbenn.com?a=1&b=2'
"""
from collections import defaultdict
import urllib
import urllib.parse
# Here is a very non-pythonic grotesque solution
def strip_url_params1(url, params_to_strip=None):
if not params_to_strip:
params_to_strip = []
if url:
result = '' # final result to be returned
tokens = url.split('?')
domain = tokens[0]
query_string = tokens[-1]
result += domain
# add the '?' to our result if it is in the url
if len(tokens) > 1:
result += '?'
if not query_string:
return url
else:
# logic for removing duplicate query strings
# build up the list by splitting the query_string using digits
key_value_string = []
string = ''
for char in query_string:
if char.isdigit():
key_value_string.append(string + char)
string = ''
else:
string += char
dict = defaultdict(int)
# logic for checking whether we should add the string to our result
for i in key_value_string:
_token = i.split('=')
if _token[0]:
length = len(_token[0])
if length == 1:
if _token and (not(_token[0] in dict)):
if params_to_strip:
if _token[0] != params_to_strip[0]:
dict[_token[0]] = _token[1]
result = result + _token[0] + '=' + _token[1]
else:
if not _token[0] in dict:
dict[_token[0]] = _token[1]
result = result + _token[0] + '=' + _token[1]
else:
check = _token[0]
letter = check[1]
if _token and (not(letter in dict)):
if params_to_strip:
if letter != params_to_strip[0]:
dict[letter] = _token[1]
result = result + _token[0] + '=' + _token[1]
else:
if not letter in dict:
dict[letter] = _token[1]
result = result + _token[0] + '=' + _token[1]
return result
# A very friendly pythonic solution (easy to follow)
def strip_url_params2(url, param_to_strip=[]):
if '?' not in url:
return url
queries = (url.split('?')[1]).split('&')
queries_obj = [query[0] for query in queries]
for i in range(len(queries_obj) - 1, 0, -1):
if queries_obj[i] in param_to_strip or queries_obj[i] in queries_obj[0:i]:
queries.pop(i)
return url.split('?')[0] + '?' + '&'.join(queries)
# Here is my friend's solution using python's builtin libraries
def strip_url_params3(url, strip=None):
if not strip: strip = []
parse = urllib.parse.urlparse(url)
query = urllib.parse.parse_qs(parse.query)
query = {k: v[0] for k, v in query.items() if k not in strip}
query = urllib.parse.urlencode(query)
new = parse._replace(query=query)
return new.geturl() | mit | 1f351f218b7382d4fafdb0bab6fd72ff | 37.747368 | 86 | 0.480435 | 4.259259 | false | false | false | false |
keon/algorithms | algorithms/matrix/count_paths.py | 2 | 1178 | #
# Count the number of unique paths from a[0][0] to a[m-1][n-1]
# We are allowed to move either right or down from a cell in the matrix.
# Approaches-
# (i) Recursion- Recurse starting from a[m-1][n-1], upwards and leftwards,
# add the path count of both recursions and return count.
# (ii) Dynamic Programming- Start from a[0][0].Store the count in a count
# matrix. Return count[m-1][n-1]
# T(n)- O(mn), S(n)- O(mn)
#
def count_paths(m, n):
if m < 1 or n < 1:
return -1
count = [[None for j in range(n)] for i in range(m)]
# Taking care of the edge cases- matrix of size 1xn or mx1
for i in range(n):
count[0][i] = 1
for j in range(m):
count[j][0] = 1
for i in range(1, m):
for j in range(1, n):
# Number of ways to reach a[i][j] = number of ways to reach
# a[i-1][j] + a[i][j-1]
count[i][j] = count[i - 1][j] + count[i][j - 1]
print(count[m - 1][n - 1])
def main():
m, n = map(int, input('Enter two positive integers: ').split())
count_paths(m, n)
if __name__ == '__main__':
main()
| mit | c59a18b5ae29be8f4d67c01febc2e2f6 | 29.205128 | 74 | 0.528014 | 2.997455 | false | false | false | false |
keon/algorithms | algorithms/strings/breaking_bad.py | 2 | 3053 | """
Given an api which returns an array of words and an array of symbols, display
the word with their matched symbol surrounded by square brackets.
If the word string matches more than one symbol, then choose the one with
longest length. (ex. 'Microsoft' matches 'i' and 'cro'):
Example:
Words array: ['Amazon', 'Microsoft', 'Google']
Symbols: ['i', 'Am', 'cro', 'Na', 'le', 'abc']
Output:
[Am]azon, Mi[cro]soft, Goog[le]
My solution(Wrong):
(I sorted the symbols array in descending order of length and ran loop over
words array to find a symbol match(using indexOf in javascript) which
worked. But I didn't make it through the interview, I am guessing my solution
was O(n^2) and they expected an efficient algorithm.
output:
['[Am]azon', 'Mi[cro]soft', 'Goog[le]', 'Amaz[o]n', 'Micr[o]s[o]ft', 'G[o][o]gle']
"""
from functools import reduce
def match_symbol(words, symbols):
import re
combined = []
for s in symbols:
for c in words:
r = re.search(s, c)
if r:
combined.append(re.sub(s, "[{}]".format(s), c))
return combined
def match_symbol_1(words, symbols):
res = []
# reversely sort the symbols according to their lengths.
symbols = sorted(symbols, key=lambda _: len(_), reverse=True)
for word in words:
for symbol in symbols:
word_replaced = ''
# once match, append the `word_replaced` to res, process next word
if word.find(symbol) != -1:
word_replaced = word.replace(symbol, '[' + symbol + ']')
res.append(word_replaced)
break
# if this word matches no symbol, append it.
if word_replaced == '':
res.append(word)
return res
"""
Another approach is to use a Tree for the dictionary (the symbols), and then
match brute force. The complexity will depend on the dictionary;
if all are suffixes of the other, it will be n*m
(where m is the size of the dictionary). For example, in Python:
"""
class TreeNode:
def __init__(self):
self.c = dict()
self.sym = None
def bracket(words, symbols):
root = TreeNode()
for s in symbols:
t = root
for char in s:
if char not in t.c:
t.c[char] = TreeNode()
t = t.c[char]
t.sym = s
result = dict()
for word in words:
i = 0
symlist = list()
while i < len(word):
j, t = i, root
while j < len(word) and word[j] in t.c:
t = t.c[word[j]]
if t.sym is not None:
symlist.append((j + 1 - len(t.sym), j + 1, t.sym))
j += 1
i += 1
if len(symlist) > 0:
sym = reduce(lambda x, y: x if x[1] - x[0] >= y[1] - y[0] else y,
symlist)
result[word] = "{}[{}]{}".format(word[:sym[0]], sym[2],
word[sym[1]:])
return tuple(word if word not in result else result[word] for word in words)
| mit | 1142d605319ab2a0b4808dcfce97b848 | 31.136842 | 82 | 0.562398 | 3.558275 | false | false | false | false |
keon/algorithms | algorithms/bfs/maze_search.py | 1 | 1487 | from collections import deque
'''
BFS time complexity : O(|E| + |V|)
BFS space complexity : O(|E| + |V|)
do BFS from (0,0) of the grid and get the minimum number of steps needed to get to the lower right column
only step on the columns whose value is 1
if there is no path, it returns -1
Ex 1)
If grid is
[[1,0,1,1,1,1],
[1,0,1,0,1,0],
[1,0,1,0,1,1],
[1,1,1,0,1,1]],
the answer is: 14
Ex 2)
If grid is
[[1,0,0],
[0,1,1],
[0,1,1]],
the answer is: -1
'''
def maze_search(maze):
BLOCKED, ALLOWED = 0, 1
UNVISITED, VISITED = 0, 1
initial_x, initial_y = 0, 0
if maze[initial_x][initial_y] == BLOCKED:
return -1
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
height, width = len(maze), len(maze[0])
target_x, target_y = height - 1, width - 1
queue = deque([(initial_x, initial_y, 0)])
is_visited = [[UNVISITED for w in range(width)] for h in range(height)]
is_visited[initial_x][initial_y] = VISITED
while queue:
x, y, steps = queue.popleft()
if x == target_x and y == target_y:
return steps
for dx, dy in directions:
new_x = x + dx
new_y = y + dy
if not (0 <= new_x < height and 0 <= new_y < width):
continue
if maze[new_x][new_y] == ALLOWED and is_visited[new_x][new_y] == UNVISITED:
queue.append((new_x, new_y, steps + 1))
is_visited[new_x][new_y] = VISITED
return -1
| mit | 7b1c564096eb3e661e6ef5cc20c31100 | 21.19403 | 105 | 0.544048 | 2.733456 | false | false | false | false |
keon/algorithms | tests/test_sort.py | 1 | 4137 | from algorithms.sort import (
bitonic_sort,
bogo_sort,
bubble_sort,
comb_sort,
counting_sort,
cycle_sort,
exchange_sort,
max_heap_sort, min_heap_sort,
merge_sort,
pancake_sort,
pigeonhole_sort,
quick_sort,
selection_sort,
bucket_sort,
shell_sort,
radix_sort,
gnome_sort,
cocktail_shaker_sort,
top_sort, top_sort_recursive
)
import unittest
def is_sorted(array):
"""
Helper function to check if the given array is sorted.
:param array: Array to check if sorted
:return: True if sorted in ascending order, else False
"""
for i in range(len(array) - 1):
if array[i] > array[i + 1]:
return False
return True
class TestSuite(unittest.TestCase):
def test_bogo_sort(self):
self.assertTrue(is_sorted(bogo_sort([1, 23, 5])))
def test_bitonic_sort(self):
self.assertTrue(is_sorted(bitonic_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_bubble_sort(self):
self.assertTrue(is_sorted(bubble_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_comb_sort(self):
self.assertTrue(is_sorted(comb_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_counting_sort(self):
self.assertTrue(is_sorted(counting_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_cycle_sort(self):
self.assertTrue(is_sorted(cycle_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_exchange_sort(self):
self.assertTrue(is_sorted(exchange_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_heap_sort(self):
self.assertTrue(is_sorted(max_heap_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
self.assertTrue(is_sorted(min_heap_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_insertion_sort(self):
self.assertTrue(is_sorted(bitonic_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_merge_sort(self):
self.assertTrue(is_sorted(merge_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_pancake_sort(self):
self.assertTrue(is_sorted(pancake_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_pigeonhole_sort(self):
self.assertTrue(is_sorted(pigeonhole_sort([1, 5, 65, 23, 57, 1232])))
def test_quick_sort(self):
self.assertTrue(is_sorted(quick_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_selection_sort(self):
self.assertTrue(is_sorted(selection_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_bucket_sort(self):
self.assertTrue(is_sorted(bucket_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_shell_sort(self):
self.assertTrue(is_sorted(shell_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_radix_sort(self):
self.assertTrue(is_sorted(radix_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_gnome_sort(self):
self.assertTrue(is_sorted(gnome_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_cocktail_shaker_sort(self):
self.assertTrue(is_sorted(cocktail_shaker_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
class TestTopSort(unittest.TestCase):
def setUp(self):
self.depGraph = {
"a": ["b"],
"b": ["c"],
"c": ['e'],
'e': ['g'],
"d": [],
"f": ["e", "d"],
"g": []
}
def test_topsort(self):
res = top_sort_recursive(self.depGraph)
# print(res)
self.assertTrue(res.index('g') < res.index('e'))
res = top_sort(self.depGraph)
self.assertTrue(res.index('g') < res.index('e'))
if __name__ == "__main__":
unittest.main()
| mit | 3d6c12d0587c4f9b2029bde7f6fa23ab | 30.823077 | 79 | 0.495528 | 3.416185 | false | true | false | false |
keon/algorithms | algorithms/strings/knuth_morris_pratt.py | 1 | 1270 | from typing import Sequence, List
def knuth_morris_pratt(text : Sequence, pattern : Sequence) -> List[int]:
"""
Given two strings text and pattern, return the list of start indexes in text that matches with the pattern
using knuth_morris_pratt algorithm.
Args:
text: Text to search
pattern: Pattern to search in the text
Returns:
List of indices of patterns found
Example:
>>> knuth_morris_pratt('hello there hero!', 'he')
[0, 7, 12]
If idx is in the list, text[idx : idx + M] matches with pattern.
Time complexity of the algorithm is O(N+M), with N and M the length of text and pattern, respectively.
"""
n = len(text)
m = len(pattern)
pi = [0 for i in range(m)]
i = 0
j = 0
# making pi table
for i in range(1, m):
while j and pattern[i] != pattern[j]:
j = pi[j - 1]
if pattern[i] == pattern[j]:
j += 1
pi[i] = j
# finding pattern
j = 0
ret = []
for i in range(n):
while j and text[i] != pattern[j]:
j = pi[j - 1]
if text[i] == pattern[j]:
j += 1
if j == m:
ret.append(i - m + 1)
j = pi[j - 1]
return ret
| mit | 829a4bc8b454f3405c86183911f20e32 | 27.863636 | 110 | 0.530709 | 3.567416 | false | false | false | false |
keon/algorithms | algorithms/backtrack/pattern_match.py | 3 | 1316 | """
Given a pattern and a string str,
find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between
a letter in pattern and a non-empty substring in str.
Examples:
pattern = "abab", str = "redblueredblue" should return true.
pattern = "aaaa", str = "asdasdasdasd" should return true.
pattern = "aabb", str = "xyzabcxzyabc" should return false.
Notes:
You may assume both pattern and str contains only lowercase letters.
"""
def pattern_match(pattern, string):
"""
:type pattern: str
:type string: str
:rtype: bool
"""
def backtrack(pattern, string, dic):
if len(pattern) == 0 and len(string) > 0:
return False
if len(pattern) == len(string) == 0:
return True
for end in range(1, len(string)-len(pattern)+2):
if pattern[0] not in dic and string[:end] not in dic.values():
dic[pattern[0]] = string[:end]
if backtrack(pattern[1:], string[end:], dic):
return True
del dic[pattern[0]]
elif pattern[0] in dic and dic[pattern[0]] == string[:end]:
if backtrack(pattern[1:], string[end:], dic):
return True
return False
return backtrack(pattern, string, {})
| mit | c8eb385f5e6a25a593de01fd13a17ec7 | 30.333333 | 74 | 0.598784 | 3.905045 | false | false | false | false |
keon/algorithms | algorithms/bfs/shortest_distance_from_all_buildings.py | 2 | 1349 | import collections
"""
do BFS from each building, and decrement all empty place for every building visit
when grid[i][j] == -b_nums, it means that grid[i][j] are already visited from all b_nums
and use dist to record distances from b_nums
"""
def shortest_distance(grid):
if not grid or not grid[0]:
return -1
matrix = [[[0,0] for i in range(len(grid[0]))] for j in range(len(grid))]
count = 0 # count how many building we have visited
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
bfs(grid, matrix, i, j, count)
count += 1
res = float('inf')
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j][1]==count:
res = min(res, matrix[i][j][0])
return res if res!=float('inf') else -1
def bfs(grid, matrix, i, j, count):
q = [(i, j, 0)]
while q:
i, j, step = q.pop(0)
for k, l in [(i-1,j), (i+1,j), (i,j-1), (i,j+1)]:
# only the position be visited by count times will append to queue
if 0<=k<len(grid) and 0<=l<len(grid[0]) and \
matrix[k][l][1]==count and grid[k][l]==0:
matrix[k][l][0] += step+1
matrix[k][l][1] = count+1
q.append((k, l, step+1))
| mit | 4ca9d17702b6f7a1d62523c46c42307e | 32.725 | 88 | 0.525574 | 3.174118 | false | false | false | false |
keon/algorithms | algorithms/tree/max_height.py | 1 | 1211 | """
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the
longest path from the root node down to the farthest leaf node.
"""
# def max_height(root):
# if not root:
# return 0
# return max(maxDepth(root.left), maxDepth(root.right)) + 1
# iterative
from tree import TreeNode
def max_height(root):
if root is None:
return 0
height = 0
queue = [root]
while queue:
height += 1
level = []
while queue:
node = queue.pop(0)
if node.left is not None:
level.append(node.left)
if node.right is not None:
level.append(node.right)
queue = level
return height
def print_tree(root):
if root is not None:
print(root.val)
print_tree(root.left)
print_tree(root.right)
if __name__ == '__main__':
tree = TreeNode(10)
tree.left = TreeNode(12)
tree.right = TreeNode(15)
tree.left.left = TreeNode(25)
tree.left.left.right = TreeNode(100)
tree.left.right = TreeNode(30)
tree.right.left = TreeNode(36)
height = max_height(tree)
print_tree(tree)
print("height:", height)
| mit | 5ffe177bfbf6ef028bc55add5ba6fd26 | 21.425926 | 63 | 0.58877 | 3.489914 | false | false | false | false |
keon/algorithms | algorithms/map/separate_chaining_hashtable.py | 2 | 2380 | import unittest
class Node(object):
def __init__(self, key=None, value=None, next=None):
self.key = key
self.value = value
self.next = next
class SeparateChainingHashTable(object):
"""
HashTable Data Type:
By having each bucket contain a linked list of elements that are hashed to that bucket.
Usage:
>>> table = SeparateChainingHashTable() # Create a new, empty map.
>>> table.put('hello', 'world') # Add a new key-value pair.
>>> len(table) # Return the number of key-value pairs stored in the map.
1
>>> table.get('hello') # Get value by key.
'world'
>>> del table['hello'] # Equivalent to `table.del_('hello')`, deleting key-value pair.
>>> table.get('hello') is None # Return `None` if a key doesn't exist.
True
"""
_empty = None
def __init__(self, size=11):
self.size = size
self._len = 0
self._table = [self._empty] * size
def put(self, key, value):
hash_ = self.hash(key)
node_ = self._table[hash_]
if node_ is self._empty:
self._table[hash_] = Node(key, value)
else:
while node_.next is not None:
if node_.key == key:
node_.value = value
return
node_ = node_.next
node_.next = Node(key, value)
self._len += 1
def get(self, key):
hash_ = self.hash(key)
node_ = self._table[hash_]
while node_ is not self._empty:
if node_.key == key:
return node_.value
node_ = node_.next
return None
def del_(self, key):
hash_ = self.hash(key)
node_ = self._table[hash_]
pre_node = None
while node_ is not None:
if node_.key == key:
if pre_node is None:
self._table[hash_] = node_.next
else:
pre_node.next = node_.next
self._len -= 1
pre_node = node_
node_ = node_.next
def hash(self, key):
return hash(key) % self.size
def __len__(self):
return self._len
def __getitem__(self, key):
return self.get(key)
def __delitem__(self, key):
return self.del_(key)
def __setitem__(self, key, value):
self.put(key, value)
| mit | 00acb1fca670d3a8b80204b31dcb8ff8 | 27.333333 | 91 | 0.511765 | 3.908046 | false | false | false | false |
keon/algorithms | algorithms/tree/bst/height.py | 2 | 1441 | """
Write a function height returns the height of a tree. The height is defined to
be the number of levels. The empty tree has height 0, a tree of one node has
height 1, a root node with one or two leaves as children has height 2, and so on
For example: height of tree is 4
9
/ \
6 12
/ \ / \
3 8 10 15
/ \
7 18
height = 4
"""
import unittest
from bst import Node
from bst import bst
def height(root):
if root is None:
return 0
else:
return 1 + max(height(root.left), height(root.right))
"""
The tree is created for testing:
9
/ \
6 12
/ \ / \
3 8 10 15
/ \
7 18
count_left_node = 4
"""
class TestSuite(unittest.TestCase):
def setUp(self):
self.tree = bst()
self.tree.insert(9)
self.tree.insert(6)
self.tree.insert(12)
self.tree.insert(3)
self.tree.insert(8)
self.tree.insert(10)
self.tree.insert(15)
self.tree.insert(7)
self.tree.insert(18)
def test_height(self):
self.assertEqual(4, height(self.tree.root))
if __name__ == '__main__':
unittest.main()
| mit | 18f3426298c52daf237d263762e7f0c1 | 23.016667 | 80 | 0.462873 | 4.013928 | false | true | false | false |
keon/algorithms | algorithms/search/jump_search.py | 1 | 1064 | """
Jump Search
Find an element in a sorted array.
"""
import math
def jump_search(arr,target):
"""
Worst-case Complexity: O(√n) (root(n))
All items in list must be sorted like binary search
Find block that contains target value and search it linearly in that block
It returns a first target value in array
reference: https://en.wikipedia.org/wiki/Jump_search
"""
length = len(arr)
block_size = int(math.sqrt(length))
block_prev = 0
block= block_size
# return -1 means that array doesn't contain target value
# find block that contains target value
if arr[length - 1] < target:
return -1
while block <= length and arr[block - 1] < target:
block_prev = block
block += block_size
# find target value in block
while arr[block_prev] < target :
block_prev += 1
if block_prev == min(block, length) :
return -1
# if there is target value in array, return it
if arr[block_prev] == target :
return block_prev
return -1
| mit | 18b024553c76df29325699f490037417 | 22.6 | 78 | 0.625235 | 3.833935 | false | false | false | false |
keon/algorithms | algorithms/matrix/bomb_enemy.py | 1 | 2560 | """
Given a 2D grid, each cell is either a wall 'W',
an enemy 'E' or empty '0' (the number zero),
return the maximum enemies you can kill using one bomb.
The bomb kills all the enemies in the same row and column from
the planted point until it hits the wall since the wall is too strong
to be destroyed.
Note that you can only put the bomb at an empty cell.
Example:
For the given grid
0 E 0 0
E 0 W E
0 E 0 0
return 3. (Placing a bomb at (1,1) kills 3 enemies)
"""
def max_killed_enemies(grid):
if not grid:
return 0
m, n = len(grid), len(grid[0])
max_killed = 0
row_e, col_e = 0, [0] * n
# iterates over all cells in the grid
for i in range(m):
for j in range(n):
# makes sure we are next to a wall.
if j == 0 or grid[i][j-1] == 'W':
row_e = row_kills(grid, i, j)
# makes sure we are next to a wall.
if i == 0 or grid[i-1][j] == 'W':
col_e[j] = col_kills(grid, i, j)
# makes sure the cell contains a 0
if grid[i][j] == '0':
# updates the variable
max_killed = max(max_killed, row_e + col_e[j])
return max_killed
# calculate killed enemies for row i from column j
def row_kills(grid, i, j):
num = 0
len_row = len(grid[0])
while j < len_row and grid[i][j] != 'W':
if grid[i][j] == 'E':
num += 1
j += 1
return num
# calculate killed enemies for column j from row i
def col_kills(grid, i, j):
num = 0
len_col = len(grid)
while i < len_col and grid[i][j] != 'W':
if grid[i][j] == 'E':
num += 1
i += 1
return num
# ----------------- TESTS -------------------------
"""
Testsuite for the project
"""
import unittest
class TestBombEnemy(unittest.TestCase):
def test_3x4(self):
grid1 = [["0", "E", "0", "0"],
["E", "0", "W", "E"],
["0", "E", "0", "0"]]
self.assertEqual(3, max_killed_enemies(grid1))
def test_4x4(self):
grid1 = [
["0", "E", "0", "E"],
["E", "E", "E", "0"],
["E", "0", "W", "E"],
["0", "E", "0", "0"]]
grid2 = [
["0", "0", "0", "E"],
["E", "0", "0", "0"],
["E", "0", "W", "E"],
["0", "E", "0", "0"]]
self.assertEqual(5, max_killed_enemies(grid1))
self.assertEqual(3, max_killed_enemies(grid2))
if __name__ == "__main__":
unittest.main()
| mit | 78ae40e23c6331653c32765c1d5ed0bc | 25.122449 | 69 | 0.475391 | 3.043995 | false | true | false | false |
keon/algorithms | algorithms/strings/is_palindrome.py | 1 | 2480 | """
Given a string, determine if it is a palindrome,
considering only alphanumeric characters and ignoring cases.
For example,
"A man, a plan, a canal: Panama" is a palindrome.
"race a car" is not a palindrome.
Note:
Have you consider that the string might be empty?
This is a good question to ask during an interview.
For the purpose of this problem,
we define empty string as valid palindrome.
"""
from string import ascii_letters
from collections import deque
def is_palindrome(s):
"""
:type s: str
:rtype: bool
"""
i = 0
j = len(s)-1
while i < j:
while not s[i].isalnum():
i += 1
while not s[j].isalnum():
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i+1, j-1
return True
"""
Here is a bunch of other variations of is_palindrome function.
Variation 1:
Find the reverse of the string and compare it with the original string
Variation 2:
Loop from the start to length/2 and check the first character and last character
and so on... for instance s[0] compared with s[n-1], s[1] == s[n-2]...
Variation 3:
Using stack idea.
Note: We are assuming that we are just checking a one word string. To check if a complete sentence
"""
def remove_punctuation(s):
"""
Remove punctuation, case sensitivity and spaces
"""
return "".join(i.lower() for i in s if i in ascii_letters)
# Variation 1
def string_reverse(s):
return s[::-1]
def is_palindrome_reverse(s):
s = remove_punctuation(s)
# can also get rid of the string_reverse function and just do this return s == s[::-1] in one line.
if (s == string_reverse(s)):
return True
return False
# Variation 2
def is_palindrome_two_pointer(s):
s = remove_punctuation(s)
for i in range(0, len(s)//2):
if (s[i] != s[len(s) - i - 1]):
return False
return True
# Variation 3
def is_palindrome_stack(s):
stack = []
s = remove_punctuation(s)
for i in range(len(s)//2, len(s)):
stack.append(s[i])
for i in range(0, len(s)//2):
if s[i] != stack.pop():
return False
return True
# Variation 4 (using deque)
def is_palindrome_deque(s):
s = remove_punctuation(s)
deq = deque()
for char in s:
deq.appendleft(char)
equal = True
while len(deq) > 1 and equal:
first = deq.pop()
last = deq.popleft()
if first != last :
equal = False
return equal
| mit | ae88b5c17e529b0eccc05396d90661d4 | 22.619048 | 100 | 0.619355 | 3.27609 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.