repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/pontoon | pontoon/sync/tests/formats/test_compare_locales.py | 2 | 9468 | import os
import shutil
import tempfile
from textwrap import dedent
import pytest
from pontoon.base.tests import (
create_named_tempfile,
LocaleFactory,
TestCase,
)
from pontoon.sync.exceptions import ParseError
from pontoon.sync.formats import compare_locales
from pontoon.sync.tests.formats import FormatTestsMixin
class CompareLocalesResourceTests(TestCase):
def setUp(self):
super().setUp()
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tempdir)
def get_invalid_file_path(self):
return os.path.join(self.tempdir, "invalid.xml")
def get_nonexistant_file_path(self):
return os.path.join(self.tempdir, "strings.xml")
def get_nonexistant_file_resource(self, path):
contents = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="source-string">Source String</string>
</resources>
"""
)
source_path = create_named_tempfile(
contents,
prefix="strings",
suffix=".xml",
directory=self.tempdir,
)
source_resource = compare_locales.CompareLocalesResource(source_path)
return compare_locales.CompareLocalesResource(
path,
source_resource=source_resource,
)
def test_init_invalid_resource(self):
"""
If no parser cannot be found for the translated resource,
raise a ParseError.
"""
path = self.get_invalid_file_path()
with pytest.raises(ParseError):
compare_locales.CompareLocalesResource(path, source_resource=None)
def test_init_missing_resource(self):
"""
If the translated resource doesn't exist and no source resource is
given, raise a ParseError.
"""
path = self.get_nonexistant_file_path()
with pytest.raises(ParseError):
compare_locales.CompareLocalesResource(path, source_resource=None)
def test_init_missing_resource_with_source(self):
"""
If the translated resource doesn't exist but a source resource is
given, return a resource with empty translations.
"""
path = self.get_nonexistant_file_path()
translated_resource = self.get_nonexistant_file_resource(path)
assert len(translated_resource.translations) == 1
assert translated_resource.translations[0].strings == {}
def test_save_create_dirs(self):
"""
If the directories in a resource's path don't exist, create them on
save.
"""
path = self.get_nonexistant_file_path()
translated_resource = self.get_nonexistant_file_resource(path)
translated_resource.translations[0].strings = {None: "New Translated String"}
assert not os.path.exists(path)
translated_resource.save(LocaleFactory.create())
assert os.path.exists(path)
BASE_ANDROID_XML_FILE = """<?xml version="1.0" encoding="utf-8"?>
<resources>
<!-- Sample comment -->
<string name="Source String">Translated String</string>
<!-- First comment -->
<!-- Second comment -->
<string name="Multiple Comments">Translated Multiple Comments</string>
<string name="No Comments or Sources">Translated No Comments or Sources</string>
<string name="Empty Translation"></string>
</resources>
"""
class AndroidXMLTests(FormatTestsMixin, TestCase):
parse = staticmethod(compare_locales.parse)
supports_keys = False
supports_source = False
supports_source_string = False
def setUp(self):
super().setUp()
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tempdir)
def parse_string(
self,
string,
source_string=None,
locale=None,
path=None,
source_path=None,
):
"""Android XML files must contain the word 'strings'."""
path = create_named_tempfile(
string,
prefix="strings",
suffix=".xml",
directory=self.tempdir,
)
if source_string is not None:
source_path = create_named_tempfile(
source_string,
prefix="strings",
suffix=".xml",
directory=self.tempdir,
)
return super().parse_string(
string,
source_string=source_string,
locale=locale,
path=path,
source_path=source_path,
)
def test_parse_basic(self):
self.run_parse_basic(BASE_ANDROID_XML_FILE, 0)
def test_parse_multiple_comments(self):
self.run_parse_multiple_comments(BASE_ANDROID_XML_FILE, 1)
def test_parse_no_comments_no_sources(self):
self.run_parse_no_comments_no_sources(BASE_ANDROID_XML_FILE, 2)
def test_parse_empty_translation(self):
self.run_parse_empty_translation(BASE_ANDROID_XML_FILE, 3)
def test_save_basic(self):
input_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<!-- Comment -->
<string name="Source String">Source String</string>
</resources>
"""
)
expected_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<!-- Comment -->
<string name="Source String">New Translated String</string>
</resources>
"""
)
self.run_save_basic(input_string, expected_string, source_string=input_string)
def test_save_remove(self):
"""Deleting strings removes them completely from the XML file."""
input_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<!-- Comment -->
<string name="Source String">Source String</string>
</resources>
"""
)
expected_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
</resources>
"""
)
self.run_save_remove(input_string, expected_string, source_string=input_string)
def test_save_source_removed(self):
"""
If an entity is missing from the source resource, remove it from
the translated resource.
"""
source_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="Source String">Source String</string>
</resources>
"""
)
input_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="Missing Source String">Translated Missing String</string>
<string name="Source String">Translated String</string>
</resources>
"""
)
expected_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="Source String">Translated String</string>
</resources>
"""
)
self.run_save_no_changes(
input_string, expected_string, source_string=source_string
)
def test_save_source_no_translation(self):
"""
If an entity is missing from the translated resource and has no
translation, do not add it back in.
"""
source_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="Source String">Source String</string>
<string name="Other Source String">Other String</string>
</resources>
"""
)
input_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="Other Source String">Translated Other String</string>
</resources>
"""
)
self.run_save_no_changes(
input_string, input_string, source_string=source_string
)
def test_save_translation_missing(self):
source_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="String">Source String</string>
<!-- Missing String Comment -->
<string name="Missing String">Missing Source String</string>
</resources>
"""
)
input_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="String">Translated String</string>
</resources>
"""
)
expected_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="String">Translated String</string>
<!-- Missing String Comment -->
<string name="Missing String">Translated Missing String</string>
</resources>
"""
)
self.run_save_translation_missing(source_string, input_string, expected_string)
def test_save_translation_identical(self):
source_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="String">Source String</string>
</resources>
"""
)
input_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="String">Translated String</string>
</resources>
"""
)
expected_string = dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="String">Source String</string>
</resources>
"""
)
self.run_save_translation_identical(
source_string, input_string, expected_string
)
| bsd-3-clause | 9df047c488cfe756c1a1abe31fae9743 | 28.495327 | 87 | 0.59548 | 4.150811 | false | true | false | false |
mozilla/pontoon | pontoon/base/migrations/0029_external_accounts.py | 2 | 1039 | # Generated by Django 3.2.13 on 2022-08-03 19:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("base", "0028_userprofile_new_contributor_notifications"),
]
operations = [
migrations.AddField(
model_name="userprofile",
name="bugzilla",
field=models.EmailField(
blank=True,
max_length=254,
null=True,
verbose_name="Bugzilla email address",
),
),
migrations.AddField(
model_name="userprofile",
name="github",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Matrix username"
),
),
migrations.AddField(
model_name="userprofile",
name="matrix",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="GitHub username"
),
),
]
| bsd-3-clause | bbc2cdf98eb535d2288fb62c27ffb4c2 | 27.081081 | 85 | 0.524543 | 4.68018 | false | false | false | false |
pkkid/python-plexapi | plexapi/sync.py | 1 | 13729 | # -*- coding: utf-8 -*-
"""
You can work with Mobile Sync on other devices straight away, but if you'd like to use your app as a `sync-target` (when
you can set items to be synced to your app) you need to init some variables.
.. code-block:: python
def init_sync():
import plexapi
plexapi.X_PLEX_PROVIDES = 'sync-target'
plexapi.BASE_HEADERS['X-Plex-Sync-Version'] = '2'
plexapi.BASE_HEADERS['X-Plex-Provides'] = plexapi.X_PLEX_PROVIDES
# mimic iPhone SE
plexapi.X_PLEX_PLATFORM = 'iOS'
plexapi.X_PLEX_PLATFORM_VERSION = '11.4.1'
plexapi.X_PLEX_DEVICE = 'iPhone'
plexapi.BASE_HEADERS['X-Plex-Platform'] = plexapi.X_PLEX_PLATFORM
plexapi.BASE_HEADERS['X-Plex-Platform-Version'] = plexapi.X_PLEX_PLATFORM_VERSION
plexapi.BASE_HEADERS['X-Plex-Device'] = plexapi.X_PLEX_DEVICE
You have to fake platform/device/model because transcoding profiles are hardcoded in Plex, and you obviously have
to explicitly specify that your app supports `sync-target`.
"""
import requests
import plexapi
from plexapi.base import PlexObject
from plexapi.exceptions import NotFound, BadRequest
class SyncItem(PlexObject):
"""
Represents single sync item, for specified server and client. When you saying in the UI to sync "this" to "that"
you're basically creating a sync item.
Attributes:
id (int): unique id of the item.
clientIdentifier (str): an identifier of Plex Client device, to which the item is belongs.
machineIdentifier (str): the id of server which holds all this content.
version (int): current version of the item. Each time you modify the item (e.g. by changing amount if media to
sync) the new version is created.
rootTitle (str): the title of library/media from which the sync item was created. E.g.:
* when you create an item for an episode 3 of season 3 of show Example, the value would be `Title of
Episode 3`
* when you create an item for a season 3 of show Example, the value would be `Season 3`
* when you set to sync all your movies in library named "My Movies" to value would be `My Movies`.
title (str): the title which you've set when created the sync item.
metadataType (str): the type of media which hides inside, can be `episode`, `movie`, etc.
contentType (str): basic type of the content: `video` or `audio`.
status (:class:`~plexapi.sync.Status`): current status of the sync.
mediaSettings (:class:`~plexapi.sync.MediaSettings`): media transcoding settings used for the item.
policy (:class:`~plexapi.sync.Policy`): the policy of which media to sync.
location (str): plex-style library url with all required filters / sorting.
"""
TAG = 'SyncItem'
def __init__(self, server, data, initpath=None, clientIdentifier=None):
super(SyncItem, self).__init__(server, data, initpath)
self.clientIdentifier = clientIdentifier
def _loadData(self, data):
self._data = data
self.id = plexapi.utils.cast(int, data.attrib.get('id'))
self.version = plexapi.utils.cast(int, data.attrib.get('version'))
self.rootTitle = data.attrib.get('rootTitle')
self.title = data.attrib.get('title')
self.metadataType = data.attrib.get('metadataType')
self.contentType = data.attrib.get('contentType')
self.machineIdentifier = data.find('Server').get('machineIdentifier')
self.status = Status(**data.find('Status').attrib)
self.mediaSettings = MediaSettings(**data.find('MediaSettings').attrib)
self.policy = Policy(**data.find('Policy').attrib)
self.location = data.find('Location').attrib.get('uri', '')
def server(self):
""" Returns :class:`~plexapi.myplex.MyPlexResource` with server of current item. """
server = [s for s in self._server.resources() if s.clientIdentifier == self.machineIdentifier]
if len(server) == 0:
raise NotFound(f'Unable to find server with uuid {self.machineIdentifier}')
return server[0]
def getMedia(self):
""" Returns list of :class:`~plexapi.base.Playable` which belong to this sync item. """
server = self.server().connect()
key = f'/sync/items/{self.id}'
return server.fetchItems(key)
def markDownloaded(self, media):
""" Mark the file as downloaded (by the nature of Plex it will be marked as downloaded within
any SyncItem where it presented).
Parameters:
media (base.Playable): the media to be marked as downloaded.
"""
url = f'/sync/{self.clientIdentifier}/item/{media.ratingKey}/downloaded'
media._server.query(url, method=requests.put)
def delete(self):
""" Removes current SyncItem """
url = SyncList.key.format(clientId=self.clientIdentifier)
url += '/' + str(self.id)
self._server.query(url, self._server._session.delete)
class SyncList(PlexObject):
""" Represents a Mobile Sync state, specific for single client, within one SyncList may be presented
items from different servers.
Attributes:
clientId (str): an identifier of the client.
items (List<:class:`~plexapi.sync.SyncItem`>): list of registered items to sync.
"""
key = 'https://plex.tv/devices/{clientId}/sync_items'
TAG = 'SyncList'
def _loadData(self, data):
self._data = data
self.clientId = data.attrib.get('clientIdentifier')
self.items = []
syncItems = data.find('SyncItems')
if syncItems:
for sync_item in syncItems.iter('SyncItem'):
item = SyncItem(self._server, sync_item, clientIdentifier=self.clientId)
self.items.append(item)
class Status:
""" Represents a current status of specific :class:`~plexapi.sync.SyncItem`.
Attributes:
failureCode: unknown, never got one yet.
failure: unknown.
state (str): server-side status of the item, can be `completed`, `pending`, empty, and probably something
else.
itemsCount (int): total items count.
itemsCompleteCount (int): count of transcoded and/or downloaded items.
itemsDownloadedCount (int): count of downloaded items.
itemsReadyCount (int): count of transcoded items, which can be downloaded.
totalSize (int): total size in bytes of complete items.
itemsSuccessfulCount (int): unknown, in my experience it always was equal to `itemsCompleteCount`.
"""
def __init__(self, itemsCount, itemsCompleteCount, state, totalSize, itemsDownloadedCount, itemsReadyCount,
itemsSuccessfulCount, failureCode, failure):
self.itemsDownloadedCount = plexapi.utils.cast(int, itemsDownloadedCount)
self.totalSize = plexapi.utils.cast(int, totalSize)
self.itemsReadyCount = plexapi.utils.cast(int, itemsReadyCount)
self.failureCode = failureCode
self.failure = failure
self.itemsSuccessfulCount = plexapi.utils.cast(int, itemsSuccessfulCount)
self.state = state
self.itemsCompleteCount = plexapi.utils.cast(int, itemsCompleteCount)
self.itemsCount = plexapi.utils.cast(int, itemsCount)
def __repr__(self):
d = dict(
itemsCount=self.itemsCount,
itemsCompleteCount=self.itemsCompleteCount,
itemsDownloadedCount=self.itemsDownloadedCount,
itemsReadyCount=self.itemsReadyCount,
itemsSuccessfulCount=self.itemsSuccessfulCount
)
return f'<{self.__class__.__name__}>:{d}'
class MediaSettings:
""" Transcoding settings used for all media within :class:`~plexapi.sync.SyncItem`.
Attributes:
audioBoost (int): unknown.
maxVideoBitrate (int|str): maximum bitrate for video, may be empty string.
musicBitrate (int|str): maximum bitrate for music, may be an empty string.
photoQuality (int): photo quality on scale 0 to 100.
photoResolution (str): maximum photo resolution, formatted as WxH (e.g. `1920x1080`).
videoResolution (str): maximum video resolution, formatted as WxH (e.g. `1280x720`, may be empty).
subtitleSize (int): subtitle size on scale 0 to 100.
videoQuality (int): video quality on scale 0 to 100.
"""
def __init__(self, maxVideoBitrate=4000, videoQuality=100, videoResolution='1280x720', audioBoost=100,
musicBitrate=192, photoQuality=74, photoResolution='1920x1080', subtitleSize=100):
self.audioBoost = plexapi.utils.cast(int, audioBoost)
self.maxVideoBitrate = plexapi.utils.cast(int, maxVideoBitrate) if maxVideoBitrate != '' else ''
self.musicBitrate = plexapi.utils.cast(int, musicBitrate) if musicBitrate != '' else ''
self.photoQuality = plexapi.utils.cast(int, photoQuality) if photoQuality != '' else ''
self.photoResolution = photoResolution
self.videoResolution = videoResolution
self.subtitleSize = plexapi.utils.cast(int, subtitleSize) if subtitleSize != '' else ''
self.videoQuality = plexapi.utils.cast(int, videoQuality) if videoQuality != '' else ''
@staticmethod
def createVideo(videoQuality):
""" Returns a :class:`~plexapi.sync.MediaSettings` object, based on provided video quality value.
Parameters:
videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in this module.
Raises:
:exc:`~plexapi.exceptions.BadRequest`: When provided unknown video quality.
"""
if videoQuality == VIDEO_QUALITY_ORIGINAL:
return MediaSettings('', '', '')
elif videoQuality < len(VIDEO_QUALITIES['bitrate']):
return MediaSettings(VIDEO_QUALITIES['bitrate'][videoQuality],
VIDEO_QUALITIES['videoQuality'][videoQuality],
VIDEO_QUALITIES['videoResolution'][videoQuality])
else:
raise BadRequest('Unexpected video quality')
@staticmethod
def createMusic(bitrate):
""" Returns a :class:`~plexapi.sync.MediaSettings` object, based on provided music quality value
Parameters:
bitrate (int): maximum bitrate for synchronized music, better use one of MUSIC_BITRATE_* values from the
module
"""
return MediaSettings(musicBitrate=bitrate)
@staticmethod
def createPhoto(resolution):
""" Returns a :class:`~plexapi.sync.MediaSettings` object, based on provided photo quality value.
Parameters:
resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the
module.
Raises:
:exc:`~plexapi.exceptions.BadRequest`: When provided unknown video quality.
"""
if resolution in PHOTO_QUALITIES:
return MediaSettings(photoQuality=PHOTO_QUALITIES[resolution], photoResolution=resolution)
else:
raise BadRequest('Unexpected photo quality')
class Policy:
""" Policy of syncing the media (how many items to sync and process watched media or not).
Attributes:
scope (str): type of limitation policy, can be `count` or `all`.
value (int): amount of media to sync, valid only when `scope=count`.
unwatched (bool): True means disallow to sync watched media.
"""
def __init__(self, scope, unwatched, value=0):
self.scope = scope
self.unwatched = plexapi.utils.cast(bool, unwatched)
self.value = plexapi.utils.cast(int, value)
@staticmethod
def create(limit=None, unwatched=False):
""" Creates a :class:`~plexapi.sync.Policy` object for provided options and automatically sets proper `scope`
value.
Parameters:
limit (int): limit items by count.
unwatched (bool): if True then watched items wouldn't be synced.
Returns:
:class:`~plexapi.sync.Policy`.
"""
scope = 'all'
if limit is None:
limit = 0
else:
scope = 'count'
return Policy(scope, unwatched, limit)
VIDEO_QUALITIES = {
'bitrate': [64, 96, 208, 320, 720, 1500, 2e3, 3e3, 4e3, 8e3, 1e4, 12e3, 2e4],
'videoResolution': ['220x128', '220x128', '284x160', '420x240', '576x320', '720x480', '1280x720', '1280x720',
'1280x720', '1920x1080', '1920x1080', '1920x1080', '1920x1080'],
'videoQuality': [10, 20, 30, 30, 40, 60, 60, 75, 100, 60, 75, 90, 100],
}
VIDEO_QUALITY_0_2_MBPS = 2
VIDEO_QUALITY_0_3_MBPS = 3
VIDEO_QUALITY_0_7_MBPS = 4
VIDEO_QUALITY_1_5_MBPS_480p = 5
VIDEO_QUALITY_2_MBPS_720p = 6
VIDEO_QUALITY_3_MBPS_720p = 7
VIDEO_QUALITY_4_MBPS_720p = 8
VIDEO_QUALITY_8_MBPS_1080p = 9
VIDEO_QUALITY_10_MBPS_1080p = 10
VIDEO_QUALITY_12_MBPS_1080p = 11
VIDEO_QUALITY_20_MBPS_1080p = 12
VIDEO_QUALITY_ORIGINAL = -1
AUDIO_BITRATE_96_KBPS = 96
AUDIO_BITRATE_128_KBPS = 128
AUDIO_BITRATE_192_KBPS = 192
AUDIO_BITRATE_320_KBPS = 320
PHOTO_QUALITIES = {
'720x480': 24,
'1280x720': 49,
'1920x1080': 74,
'3840x2160': 99,
}
PHOTO_QUALITY_HIGHEST = PHOTO_QUALITY_2160p = '3840x2160'
PHOTO_QUALITY_HIGH = PHOTO_QUALITY_1080p = '1920x1080'
PHOTO_QUALITY_MEDIUM = PHOTO_QUALITY_720p = '1280x720'
PHOTO_QUALITY_LOW = PHOTO_QUALITY_480p = '720x480'
| bsd-3-clause | 0ea226968927c3d72c94e246657fcae3 | 42.86262 | 120 | 0.644402 | 3.846736 | false | false | false | false |
pkkid/python-plexapi | tools/version_bump.py | 2 | 3173 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Helper script to bump the current version."""
import argparse
import re
import subprocess
from packaging.version import Version
from plexapi import const
SUPPORTED_BUMP_TYPES = ["patch", "minor", "major"]
def _bump_release(release, bump_type):
"""Return a bumped release tuple consisting of 3 numbers."""
major, minor, patch = release
if bump_type == "patch":
patch += 1
elif bump_type == "minor":
minor += 1
patch = 0
elif bump_type == "major":
major += 1
minor = 0
patch = 0
return major, minor, patch
def bump_version(version, bump_type):
"""Return a new version given a current version and action."""
new_release = _bump_release(version.release, bump_type)
temp = Version("0")
temp._version = version._version._replace(release=new_release)
return Version(str(temp))
def write_version(version):
"""Update plexapi constant file with new version."""
with open("plexapi/const.py") as f:
content = f.read()
version_names = ["MAJOR", "MINOR", "PATCH"]
version_values = str(version).split(".", 2)
for n, v in zip(version_names, version_values):
version_line = f"{n}_VERSION = "
content = re.sub(f"{version_line}.*\n", f"{version_line}{v}\n", content)
with open("plexapi/const.py", "wt") as f:
content = f.write(content)
def main():
"""Execute script."""
parser = argparse.ArgumentParser(description="Bump version of plexapi")
parser.add_argument(
"bump_type",
help="The type of version bump to perform",
choices=SUPPORTED_BUMP_TYPES,
)
parser.add_argument(
"--commit", action="store_true", help="Create a version bump commit"
)
parser.add_argument(
"--tag", action="store_true", help="Tag the commit with the release version"
)
arguments = parser.parse_args()
if arguments.tag and not arguments.commit:
parser.error("--tag requires use of --commit")
if arguments.commit and subprocess.run(["git", "diff", "--quiet"]).returncode == 1:
print("Cannot use --commit because git is dirty")
return
current = Version(const.__version__)
bumped = bump_version(current, arguments.bump_type)
assert bumped > current, "Bumped version is not newer than old version"
write_version(bumped)
if not arguments.commit:
return
subprocess.run(["git", "commit", "-nam", f"Release {bumped}"])
if arguments.tag:
subprocess.run(["git", "tag", str(bumped), "-m", f"Release {bumped}"])
def test_bump_version():
"""Make sure it all works."""
import pytest
assert bump_version(Version("4.7.0"), "patch") == Version("4.7.1")
assert bump_version(Version("4.7.0"), "minor") == Version("4.8.0")
assert bump_version(Version("4.7.3"), "minor") == Version("4.8.0")
assert bump_version(Version("4.7.0"), "major") == Version("5.0.0")
assert bump_version(Version("4.7.3"), "major") == Version("5.0.0")
assert bump_version(Version("5.0.0"), "major") == Version("6.0.0")
if __name__ == "__main__":
main()
| bsd-3-clause | 56cd5accb262b0506aeb42cec29f63ec | 28.933962 | 87 | 0.617082 | 3.597506 | false | false | false | false |
pkkid/python-plexapi | tools/plex-listdocattrs.py | 1 | 1356 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plex-ListDocAttrs is used during development of PlexAPI.
Example usage: AttDS(dict or object).write()
"""
import re
from collections import OrderedDict
def type_finder(s):
type_string = str(type(s))
x = re.search("'(.+)'", type_string)
if x:
return x.group(1)
return ''
class AttDS:
""" Helper that prints docstring attrs. """
def __init__(self, o, keys=None, style='google'):
self.__o = o
if not isinstance(o, dict):
self.o = o.__dict__.items()
self._as_dict = o.__dict__
else:
self.o = o.items()
self._as_dict = o
if keys is None:
self.keys = self._as_dict.keys()
else:
self.keys = keys
if style == 'google':
self.template = '%s (%s): %s'
self.res_dict = OrderedDict()
self.parse()
def parse(self):
for k, v in sorted(self.o, key=lambda k: k[0]):
if self.keys:
ds = ''
for key in self.keys:
ds += '%s=%s ' % (key, self._as_dict.get(key, ''))
else:
ds = ''
self.res_dict[k] = self.template % (k, type_finder(v), ds)
def write(self):
for k, v in self.res_dict.items():
print(v)
| bsd-3-clause | c4bc05b15b7b5172381f7803dc0f9dbb | 25.588235 | 70 | 0.487463 | 3.503876 | false | false | false | false |
pkkid/python-plexapi | plexapi/server.py | 1 | 60071 | # -*- coding: utf-8 -*-
from urllib.parse import urlencode
from xml.etree import ElementTree
import requests
import os
from plexapi import (BASE_HEADERS, CONFIG, TIMEOUT, X_PLEX_CONTAINER_SIZE, log,
logfilter)
from plexapi import utils
from plexapi.alert import AlertListener
from plexapi.base import PlexObject
from plexapi.client import PlexClient
from plexapi.collection import Collection
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
from plexapi.library import Hub, Library, Path, File
from plexapi.media import Conversion, Optimized
from plexapi.playlist import Playlist
from plexapi.playqueue import PlayQueue
from plexapi.settings import Settings
from plexapi.utils import deprecated
from requests.status_codes import _codes as codes
# Need these imports to populate utils.PLEXOBJECTS
from plexapi import audio as _audio # noqa: F401
from plexapi import collection as _collection # noqa: F401
from plexapi import media as _media # noqa: F401
from plexapi import photo as _photo # noqa: F401
from plexapi import playlist as _playlist # noqa: F401
from plexapi import video as _video # noqa: F401
class PlexServer(PlexObject):
""" This is the main entry point to interacting with a Plex server. It allows you to
list connected clients, browse your library sections and perform actions such as
emptying trash. If you do not know the auth token required to access your Plex
server, or simply want to access your server with your username and password, you
can also create an PlexServer instance from :class:`~plexapi.myplex.MyPlexAccount`.
Parameters:
baseurl (str): Base url for to access the Plex Media Server (default: 'http://localhost:32400').
token (str): Required Plex authentication token to access the server.
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from the server.
timeout (int, optional): Timeout in seconds on initial connection to the server
(default config.TIMEOUT).
Attributes:
allowCameraUpload (bool): True if server allows camera upload.
allowChannelAccess (bool): True if server allows channel access (iTunes?).
allowMediaDeletion (bool): True is server allows media to be deleted.
allowSharing (bool): True is server allows sharing.
allowSync (bool): True is server allows sync.
backgroundProcessing (bool): Unknown
certificate (bool): True if server has an HTTPS certificate.
companionProxy (bool): Unknown
diagnostics (bool): Unknown
eventStream (bool): Unknown
friendlyName (str): Human friendly name for this server.
hubSearch (bool): True if `Hub Search <https://www.plex.tv/blog
/seek-plex-shall-find-leveling-web-app/>`_ is enabled. I believe this
is enabled for everyone
machineIdentifier (str): Unique ID for this server (looks like an md5).
multiuser (bool): True if `multiusers <https://support.plex.tv/hc/en-us/articles
/200250367-Multi-User-Support>`_ are enabled.
myPlex (bool): Unknown (True if logged into myPlex?).
myPlexMappingState (str): Unknown (ex: mapped).
myPlexSigninState (str): Unknown (ex: ok).
myPlexSubscription (bool): True if you have a myPlex subscription.
myPlexUsername (str): Email address if signed into myPlex (user@example.com)
ownerFeatures (list): List of features allowed by the server owner. This may be based
on your PlexPass subscription. Features include: camera_upload, cloudsync,
content_filter, dvr, hardware_transcoding, home, lyrics, music_videos, pass,
photo_autotags, premium_music_metadata, session_bandwidth_restrictions, sync,
trailers, webhooks (and maybe more).
photoAutoTag (bool): True if photo `auto-tagging <https://support.plex.tv/hc/en-us
/articles/234976627-Auto-Tagging-of-Photos>`_ is enabled.
platform (str): Platform the server is hosted on (ex: Linux)
platformVersion (str): Platform version (ex: '6.1 (Build 7601)', '4.4.0-59-generic').
pluginHost (bool): Unknown
readOnlyLibraries (bool): Unknown
requestParametersInCookie (bool): Unknown
streamingBrainVersion (bool): Current `Streaming Brain <https://www.plex.tv/blog
/mcstreamy-brain-take-world-two-easy-steps/>`_ version.
sync (bool): True if `syncing to a device <https://support.plex.tv/hc/en-us/articles
/201053678-Sync-Media-to-a-Device>`_ is enabled.
transcoderActiveVideoSessions (int): Number of active video transcoding sessions.
transcoderAudio (bool): True if audio transcoding audio is available.
transcoderLyrics (bool): True if audio transcoding lyrics is available.
transcoderPhoto (bool): True if audio transcoding photos is available.
transcoderSubtitles (bool): True if audio transcoding subtitles is available.
transcoderVideo (bool): True if audio transcoding video is available.
transcoderVideoBitrates (bool): List of video bitrates.
transcoderVideoQualities (bool): List of video qualities.
transcoderVideoResolutions (bool): List of video resolutions.
updatedAt (int): Datetime the server was updated.
updater (bool): Unknown
version (str): Current Plex version (ex: 1.3.2.3112-1751929)
voiceSearch (bool): True if voice search is enabled. (is this Google Voice search?)
_baseurl (str): HTTP address of the client.
_token (str): Token used to access this client.
_session (obj): Requests session object used to access this client.
"""
key = '/'
def __init__(self, baseurl=None, token=None, session=None, timeout=None):
self._baseurl = baseurl or CONFIG.get('auth.server_baseurl', 'http://localhost:32400')
self._baseurl = self._baseurl.rstrip('/')
self._token = logfilter.add_secret(token or CONFIG.get('auth.server_token'))
self._showSecrets = CONFIG.get('log.show_secrets', '').lower() == 'true'
self._session = session or requests.Session()
self._timeout = timeout
self._library = None # cached library
self._settings = None # cached settings
self._myPlexAccount = None # cached myPlexAccount
self._systemAccounts = None # cached list of SystemAccount
self._systemDevices = None # cached list of SystemDevice
data = self.query(self.key, timeout=self._timeout)
super(PlexServer, self).__init__(self, data, self.key)
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))
self.allowChannelAccess = utils.cast(bool, data.attrib.get('allowChannelAccess'))
self.allowMediaDeletion = utils.cast(bool, data.attrib.get('allowMediaDeletion'))
self.allowSharing = utils.cast(bool, data.attrib.get('allowSharing'))
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.backgroundProcessing = utils.cast(bool, data.attrib.get('backgroundProcessing'))
self.certificate = utils.cast(bool, data.attrib.get('certificate'))
self.companionProxy = utils.cast(bool, data.attrib.get('companionProxy'))
self.diagnostics = utils.toList(data.attrib.get('diagnostics'))
self.eventStream = utils.cast(bool, data.attrib.get('eventStream'))
self.friendlyName = data.attrib.get('friendlyName')
self.hubSearch = utils.cast(bool, data.attrib.get('hubSearch'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.multiuser = utils.cast(bool, data.attrib.get('multiuser'))
self.myPlex = utils.cast(bool, data.attrib.get('myPlex'))
self.myPlexMappingState = data.attrib.get('myPlexMappingState')
self.myPlexSigninState = data.attrib.get('myPlexSigninState')
self.myPlexSubscription = utils.cast(bool, data.attrib.get('myPlexSubscription'))
self.myPlexUsername = data.attrib.get('myPlexUsername')
self.ownerFeatures = utils.toList(data.attrib.get('ownerFeatures'))
self.photoAutoTag = utils.cast(bool, data.attrib.get('photoAutoTag'))
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.pluginHost = utils.cast(bool, data.attrib.get('pluginHost'))
self.readOnlyLibraries = utils.cast(int, data.attrib.get('readOnlyLibraries'))
self.requestParametersInCookie = utils.cast(bool, data.attrib.get('requestParametersInCookie'))
self.streamingBrainVersion = data.attrib.get('streamingBrainVersion')
self.sync = utils.cast(bool, data.attrib.get('sync'))
self.transcoderActiveVideoSessions = int(data.attrib.get('transcoderActiveVideoSessions', 0))
self.transcoderAudio = utils.cast(bool, data.attrib.get('transcoderAudio'))
self.transcoderLyrics = utils.cast(bool, data.attrib.get('transcoderLyrics'))
self.transcoderPhoto = utils.cast(bool, data.attrib.get('transcoderPhoto'))
self.transcoderSubtitles = utils.cast(bool, data.attrib.get('transcoderSubtitles'))
self.transcoderVideo = utils.cast(bool, data.attrib.get('transcoderVideo'))
self.transcoderVideoBitrates = utils.toList(data.attrib.get('transcoderVideoBitrates'))
self.transcoderVideoQualities = utils.toList(data.attrib.get('transcoderVideoQualities'))
self.transcoderVideoResolutions = utils.toList(data.attrib.get('transcoderVideoResolutions'))
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))
self.updater = utils.cast(bool, data.attrib.get('updater'))
self.version = data.attrib.get('version')
self.voiceSearch = utils.cast(bool, data.attrib.get('voiceSearch'))
def _headers(self, **kwargs):
""" Returns dict containing base headers for all requests to the server. """
headers = BASE_HEADERS.copy()
if self._token:
headers['X-Plex-Token'] = self._token
headers.update(kwargs)
return headers
def _uriRoot(self):
return f'server://{self.machineIdentifier}/com.plexapp.plugins.library'
@property
def library(self):
""" Library to browse or search your media. """
if not self._library:
try:
data = self.query(Library.key)
self._library = Library(self, data)
except BadRequest:
data = self.query('/library/sections/')
# Only the owner has access to /library
# so just return the library without the data.
return Library(self, data)
return self._library
@property
def settings(self):
""" Returns a list of all server settings. """
if not self._settings:
data = self.query(Settings.key)
self._settings = Settings(self, data)
return self._settings
def account(self):
""" Returns the :class:`~plexapi.server.Account` object this server belongs to. """
data = self.query(Account.key)
return Account(self, data)
def claim(self, account):
""" Claim the Plex server using a :class:`~plexapi.myplex.MyPlexAccount`.
This will only work with an unclaimed server on localhost or the same subnet.
Parameters:
account (:class:`~plexapi.myplex.MyPlexAccount`): The account used to
claim the server.
"""
key = '/myplex/claim'
params = {'token': account.claimToken()}
data = self.query(key, method=self._session.post, params=params)
return Account(self, data)
def unclaim(self):
""" Unclaim the Plex server. This will remove the server from your
:class:`~plexapi.myplex.MyPlexAccount`.
"""
data = self.query(Account.key, method=self._session.delete)
return Account(self, data)
@property
def activities(self):
"""Returns all current PMS activities."""
activities = []
for elem in self.query(Activity.key):
activities.append(Activity(self, elem))
return activities
def agents(self, mediaType=None):
""" Returns a list of :class:`~plexapi.media.Agent` objects this server has available. """
key = '/system/agents'
if mediaType:
key += f'?mediaType={utils.searchType(mediaType)}'
return self.fetchItems(key)
def createToken(self, type='delegation', scope='all'):
""" Create a temp access token for the server. """
if not self._token:
# Handle unclaimed servers
return None
q = self.query(f'/security/token?type={type}&scope={scope}')
return q.attrib.get('token')
def switchUser(self, username, session=None, timeout=None):
""" Returns a new :class:`~plexapi.server.PlexServer` object logged in as the given username.
Note: Only the admin account can switch to other users.
Parameters:
username (str): Username, email or user id of the user to log in to the server.
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from the server. This will default to the same
session as the admin account if no new session is provided.
timeout (int, optional): Timeout in seconds on initial connection to the server.
This will default to the same timeout as the admin account if no new timeout
is provided.
Example:
.. code-block:: python
from plexapi.server import PlexServer
# Login to the Plex server using the admin token
plex = PlexServer('http://plexserver:32400', token='2ffLuB84dqLswk9skLos')
# Login to the same Plex server using a different account
userPlex = plex.switchUser("Username")
"""
user = self.myPlexAccount().user(username)
userToken = user.get_token(self.machineIdentifier)
if session is None:
session = self._session
if timeout is None:
timeout = self._timeout
return PlexServer(self._baseurl, token=userToken, session=session, timeout=timeout)
def systemAccounts(self):
""" Returns a list of :class:`~plexapi.server.SystemAccount` objects this server contains. """
if self._systemAccounts is None:
key = '/accounts'
self._systemAccounts = self.fetchItems(key, SystemAccount)
return self._systemAccounts
def systemAccount(self, accountID):
""" Returns the :class:`~plexapi.server.SystemAccount` object for the specified account ID.
Parameters:
accountID (int): The :class:`~plexapi.server.SystemAccount` ID.
"""
try:
return next(account for account in self.systemAccounts() if account.id == accountID)
except StopIteration:
raise NotFound(f'Unknown account with accountID={accountID}') from None
def systemDevices(self):
""" Returns a list of :class:`~plexapi.server.SystemDevice` objects this server contains. """
if self._systemDevices is None:
key = '/devices'
self._systemDevices = self.fetchItems(key, SystemDevice)
return self._systemDevices
def systemDevice(self, deviceID):
""" Returns the :class:`~plexapi.server.SystemDevice` object for the specified device ID.
Parameters:
deviceID (int): The :class:`~plexapi.server.SystemDevice` ID.
"""
try:
return next(device for device in self.systemDevices() if device.id == deviceID)
except StopIteration:
raise NotFound(f'Unknown device with deviceID={deviceID}') from None
def myPlexAccount(self):
""" Returns a :class:`~plexapi.myplex.MyPlexAccount` object using the same
token to access this server. If you are not the owner of this PlexServer
you're likely to receive an authentication error calling this.
"""
if self._myPlexAccount is None:
from plexapi.myplex import MyPlexAccount
self._myPlexAccount = MyPlexAccount(token=self._token)
return self._myPlexAccount
def _myPlexClientPorts(self):
""" Sometimes the PlexServer does not properly advertise port numbers required
to connect. This attempts to look up device port number from plex.tv.
See issue #126: Make PlexServer.clients() more user friendly.
https://github.com/pkkid/python-plexapi/issues/126
"""
try:
ports = {}
account = self.myPlexAccount()
for device in account.devices():
if device.connections and ':' in device.connections[0][6:]:
ports[device.clientIdentifier] = device.connections[0].split(':')[-1]
return ports
except Exception as err:
log.warning('Unable to fetch client ports from myPlex: %s', err)
return ports
def browse(self, path=None, includeFiles=True):
""" Browse the system file path using the Plex API.
Returns list of :class:`~plexapi.library.Path` and :class:`~plexapi.library.File` objects.
Parameters:
path (:class:`~plexapi.library.Path` or str, optional): Full path to browse.
includeFiles (bool): True to include files when browsing (Default).
False to only return folders.
"""
if isinstance(path, Path):
key = path.key
elif path is not None:
base64path = utils.base64str(path)
key = f'/services/browse/{base64path}'
else:
key = '/services/browse'
if includeFiles:
key += '?includeFiles=1'
return self.fetchItems(key)
def walk(self, path=None):
""" Walk the system file tree using the Plex API similar to `os.walk`.
Yields a 3-tuple `(path, paths, files)` where
`path` is a string of the directory path,
`paths` is a list of :class:`~plexapi.library.Path` objects, and
`files` is a list of :class:`~plexapi.library.File` objects.
Parameters:
path (:class:`~plexapi.library.Path` or str, optional): Full path to walk.
"""
paths = []
files = []
for item in self.browse(path):
if isinstance(item, Path):
paths.append(item)
elif isinstance(item, File):
files.append(item)
if isinstance(path, Path):
path = path.path
yield path or '', paths, files
for _path in paths:
for path, paths, files in self.walk(_path):
yield path, paths, files
def isBrowsable(self, path):
""" Returns True if the Plex server can browse the given path.
Parameters:
path (:class:`~plexapi.library.Path` or str): Full path to browse.
"""
if isinstance(path, Path):
path = path.path
paths = [p.path for p in self.browse(os.path.dirname(path), includeFiles=False)]
return path in paths
def clients(self):
""" Returns list of all :class:`~plexapi.client.PlexClient` objects connected to server. """
items = []
ports = None
for elem in self.query('/clients'):
port = elem.attrib.get('port')
if not port:
log.warning('%s did not advertise a port, checking plex.tv.', elem.attrib.get('name'))
ports = self._myPlexClientPorts() if ports is None else ports
port = ports.get(elem.attrib.get('machineIdentifier'))
baseurl = f"http://{elem.attrib['host']}:{port}"
items.append(PlexClient(baseurl=baseurl, server=self,
token=self._token, data=elem, connect=False))
return items
def client(self, name):
""" Returns the :class:`~plexapi.client.PlexClient` that matches the specified name.
Parameters:
name (str): Name of the client to return.
Raises:
:exc:`~plexapi.exceptions.NotFound`: Unknown client name.
"""
for client in self.clients():
if client and client.title == name:
return client
raise NotFound(f'Unknown client name: {name}')
def createCollection(self, title, section, items=None, smart=False, limit=None,
libtype=None, sort=None, filters=None, **kwargs):
""" Creates and returns a new :class:`~plexapi.collection.Collection`.
Parameters:
title (str): Title of the collection.
section (:class:`~plexapi.library.LibrarySection`, str): The library section to create the collection in.
items (List): Regular collections only, list of :class:`~plexapi.audio.Audio`,
:class:`~plexapi.video.Video`, or :class:`~plexapi.photo.Photo` objects to be added to the collection.
smart (bool): True to create a smart collection. Default False.
limit (int): Smart collections only, limit the number of items in the collection.
libtype (str): Smart collections only, the specific type of content to filter
(movie, show, season, episode, artist, album, track, photoalbum, photo).
sort (str or list, optional): Smart collections only, a string of comma separated sort fields
or a list of sort fields in the format ``column:dir``.
See :func:`~plexapi.library.LibrarySection.search` for more info.
filters (dict): Smart collections only, a dictionary of advanced filters.
See :func:`~plexapi.library.LibrarySection.search` for more info.
**kwargs (dict): Smart collections only, additional custom filters to apply to the
search results. See :func:`~plexapi.library.LibrarySection.search` for more info.
Raises:
:class:`plexapi.exceptions.BadRequest`: When no items are included to create the collection.
:class:`plexapi.exceptions.BadRequest`: When mixing media types in the collection.
Returns:
:class:`~plexapi.collection.Collection`: A new instance of the created Collection.
"""
return Collection.create(
self, title, section, items=items, smart=smart, limit=limit,
libtype=libtype, sort=sort, filters=filters, **kwargs)
def createPlaylist(self, title, section=None, items=None, smart=False, limit=None,
libtype=None, sort=None, filters=None, **kwargs):
""" Creates and returns a new :class:`~plexapi.playlist.Playlist`.
Parameters:
title (str): Title of the playlist.
section (:class:`~plexapi.library.LibrarySection`, str): Smart playlists only,
library section to create the playlist in.
items (List): Regular playlists only, list of :class:`~plexapi.audio.Audio`,
:class:`~plexapi.video.Video`, or :class:`~plexapi.photo.Photo` objects to be added to the playlist.
smart (bool): True to create a smart playlist. Default False.
limit (int): Smart playlists only, limit the number of items in the playlist.
libtype (str): Smart playlists only, the specific type of content to filter
(movie, show, season, episode, artist, album, track, photoalbum, photo).
sort (str or list, optional): Smart playlists only, a string of comma separated sort fields
or a list of sort fields in the format ``column:dir``.
See :func:`~plexapi.library.LibrarySection.search` for more info.
filters (dict): Smart playlists only, a dictionary of advanced filters.
See :func:`~plexapi.library.LibrarySection.search` for more info.
**kwargs (dict): Smart playlists only, additional custom filters to apply to the
search results. See :func:`~plexapi.library.LibrarySection.search` for more info.
Raises:
:class:`plexapi.exceptions.BadRequest`: When no items are included to create the playlist.
:class:`plexapi.exceptions.BadRequest`: When mixing media types in the playlist.
Returns:
:class:`~plexapi.playlist.Playlist`: A new instance of the created Playlist.
"""
return Playlist.create(
self, title, section=section, items=items, smart=smart, limit=limit,
libtype=libtype, sort=sort, filters=filters, **kwargs)
def createPlayQueue(self, item, **kwargs):
""" Creates and returns a new :class:`~plexapi.playqueue.PlayQueue`.
Parameters:
item (Media or Playlist): Media or playlist to add to PlayQueue.
kwargs (dict): See `~plexapi.playqueue.PlayQueue.create`.
"""
return PlayQueue.create(self, item, **kwargs)
def downloadDatabases(self, savepath=None, unpack=False):
""" Download databases.
Parameters:
savepath (str): Defaults to current working dir.
unpack (bool): Unpack the zip file.
"""
url = self.url('/diagnostics/databases')
filepath = utils.download(url, self._token, None, savepath, self._session, unpack=unpack)
return filepath
def downloadLogs(self, savepath=None, unpack=False):
""" Download server logs.
Parameters:
savepath (str): Defaults to current working dir.
unpack (bool): Unpack the zip file.
"""
url = self.url('/diagnostics/logs')
filepath = utils.download(url, self._token, None, savepath, self._session, unpack=unpack)
return filepath
def butlerTasks(self):
""" Return a list of :class:`~plexapi.base.ButlerTask` objects. """
return self.fetchItems('/butler')
def runButlerTask(self, task):
""" Manually run a butler task immediately instead of waiting for the scheduled task to run.
Note: The butler task is run asynchronously. Check Plex Web to monitor activity.
Parameters:
task (str): The name of the task to run. (e.g. 'BackupDatabase')
Example:
.. code-block:: python
availableTasks = [task.name for task in plex.butlerTasks()]
print("Available butler tasks:", availableTasks)
"""
validTasks = [task.name for task in self.butlerTasks()]
if task not in validTasks:
raise BadRequest(
f'Invalid butler task: {task}. Available tasks are: {validTasks}'
)
self.query(f'/butler/{task}', method=self._session.post)
return self
@deprecated('use "checkForUpdate" instead')
def check_for_update(self, force=True, download=False):
return self.checkForUpdate(force=force, download=download)
def checkForUpdate(self, force=True, download=False):
""" Returns a :class:`~plexapi.base.Release` object containing release info.
Parameters:
force (bool): Force server to check for new releases
download (bool): Download if a update is available.
"""
part = f'/updater/check?download={1 if download else 0}'
if force:
self.query(part, method=self._session.put)
releases = self.fetchItems('/updater/status')
if len(releases):
return releases[0]
def isLatest(self):
""" Check if the installed version of PMS is the latest. """
release = self.checkForUpdate(force=True)
return release is None
def installUpdate(self):
""" Install the newest version of Plex Media Server. """
# We can add this but dunno how useful this is since it sometimes
# requires user action using a gui.
part = '/updater/apply'
release = self.checkForUpdate(force=True, download=True)
if release and release.version != self.version:
# figure out what method this is..
return self.query(part, method=self._session.put)
def history(self, maxresults=9999999, mindate=None, ratingKey=None, accountID=None, librarySectionID=None):
""" Returns a list of media items from watched history. If there are many results, they will
be fetched from the server in batches of X_PLEX_CONTAINER_SIZE amounts. If you're only
looking for the first <num> results, it would be wise to set the maxresults option to that
amount so this functions doesn't iterate over all results on the server.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from. This really helps speed
up the result listing. For example: datetime.now() - timedelta(days=7)
ratingKey (int/str) Request history for a specific ratingKey item.
accountID (int/str) Request history for a specific account ID.
librarySectionID (int/str) Request history for a specific library section ID.
"""
results, subresults = [], '_init'
args = {'sort': 'viewedAt:desc'}
if ratingKey:
args['metadataItemID'] = ratingKey
if accountID:
args['accountID'] = accountID
if librarySectionID:
args['librarySectionID'] = librarySectionID
if mindate:
args['viewedAt>'] = int(mindate.timestamp())
args['X-Plex-Container-Start'] = 0
args['X-Plex-Container-Size'] = min(X_PLEX_CONTAINER_SIZE, maxresults)
while subresults and maxresults > len(results):
key = f'/status/sessions/history/all{utils.joinArgs(args)}'
subresults = self.fetchItems(key)
results += subresults[:maxresults - len(results)]
args['X-Plex-Container-Start'] += args['X-Plex-Container-Size']
return results
def playlists(self, playlistType=None, sectionId=None, title=None, sort=None, **kwargs):
""" Returns a list of all :class:`~plexapi.playlist.Playlist` objects on the server.
Parameters:
playlistType (str, optional): The type of playlists to return (audio, video, photo).
Default returns all playlists.
sectionId (int, optional): The section ID (key) of the library to search within.
title (str, optional): General string query to search for. Partial string matches are allowed.
sort (str or list, optional): A string of comma separated sort fields in the format ``column:dir``.
"""
args = {}
if playlistType is not None:
args['playlistType'] = playlistType
if sectionId is not None:
args['sectionID'] = sectionId
if title is not None:
args['title'] = title
if sort is not None:
# TODO: Automatically retrieve and validate sort field similar to LibrarySection.search()
args['sort'] = sort
key = f'/playlists{utils.joinArgs(args)}'
return self.fetchItems(key, **kwargs)
def playlist(self, title):
""" Returns the :class:`~plexapi.client.Playlist` that matches the specified title.
Parameters:
title (str): Title of the playlist to return.
Raises:
:exc:`~plexapi.exceptions.NotFound`: Unable to find playlist.
"""
try:
return self.playlists(title=title, title__iexact=title)[0]
except IndexError:
raise NotFound(f'Unable to find playlist with title "{title}".') from None
def optimizedItems(self, removeAll=None):
""" Returns list of all :class:`~plexapi.media.Optimized` objects connected to server. """
if removeAll is True:
key = '/playlists/generators?type=42'
self.query(key, method=self._server._session.delete)
else:
backgroundProcessing = self.fetchItem('/playlists?type=42')
return self.fetchItems(f'{backgroundProcessing.key}/items', cls=Optimized)
@deprecated('use "plexapi.media.Optimized.items()" instead')
def optimizedItem(self, optimizedID):
""" Returns single queued optimized item :class:`~plexapi.media.Video` object.
Allows for using optimized item ID to connect back to source item.
"""
backgroundProcessing = self.fetchItem('/playlists?type=42')
return self.fetchItem(f'{backgroundProcessing.key}/items/{optimizedID}/items')
def conversions(self, pause=None):
""" Returns list of all :class:`~plexapi.media.Conversion` objects connected to server. """
if pause is True:
self.query('/:/prefs?BackgroundQueueIdlePaused=1', method=self._server._session.put)
elif pause is False:
self.query('/:/prefs?BackgroundQueueIdlePaused=0', method=self._server._session.put)
else:
return self.fetchItems('/playQueues/1', cls=Conversion)
def currentBackgroundProcess(self):
""" Returns list of all :class:`~plexapi.media.TranscodeJob` objects running or paused on server. """
return self.fetchItems('/status/sessions/background')
def query(self, key, method=None, headers=None, timeout=None, **kwargs):
""" Main method used to handle HTTPS requests to the Plex server. This method helps
by encoding the response to utf-8 and parsing the returned XML into and
ElementTree object. Returns None if no data exists in the response.
"""
url = self.url(key)
method = method or self._session.get
timeout = timeout or TIMEOUT
log.debug('%s %s', method.__name__.upper(), url)
headers = self._headers(**headers or {})
response = method(url, headers=headers, timeout=timeout, **kwargs)
if response.status_code not in (200, 201, 204):
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
message = f'({response.status_code}) {codename}; {response.url} {errtext}'
if response.status_code == 401:
raise Unauthorized(message)
elif response.status_code == 404:
raise NotFound(message)
else:
raise BadRequest(message)
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def search(self, query, mediatype=None, limit=None, sectionId=None):
""" Returns a list of media items or filter categories from the resulting
`Hub Search <https://www.plex.tv/blog/seek-plex-shall-find-leveling-web-app/>`_
against all items in your Plex library. This searches genres, actors, directors,
playlists, as well as all the obvious media titles. It performs spell-checking
against your search terms (because KUROSAWA is hard to spell). It also provides
contextual search results. So for example, if you search for 'Pernice', it’ll
return 'Pernice Brothers' as the artist result, but we’ll also go ahead and
return your most-listened to albums and tracks from the artist. If you type
'Arnold' you’ll get a result for the actor, but also the most recently added
movies he’s in.
Parameters:
query (str): Query to use when searching your library.
mediatype (str, optional): Limit your search to the specified media type.
actor, album, artist, autotag, collection, director, episode, game, genre,
movie, photo, photoalbum, place, playlist, shared, show, tag, track
limit (int, optional): Limit to the specified number of results per Hub.
sectionId (int, optional): The section ID (key) of the library to search within.
"""
results = []
params = {
'query': query,
'includeCollections': 1,
'includeExternalMedia': 1}
if limit:
params['limit'] = limit
if sectionId:
params['sectionId'] = sectionId
key = f'/hubs/search?{urlencode(params)}'
for hub in self.fetchItems(key, Hub):
if mediatype:
if hub.type == mediatype:
return hub.items
else:
results += hub.items
return results
def sessions(self):
""" Returns a list of all active session (currently playing) media objects. """
return self.fetchItems('/status/sessions')
def transcodeSessions(self):
""" Returns a list of all active :class:`~plexapi.media.TranscodeSession` objects. """
return self.fetchItems('/transcode/sessions')
def startAlertListener(self, callback=None, callbackError=None):
""" Creates a websocket connection to the Plex Server to optionally receive
notifications. These often include messages from Plex about media scans
as well as updates to currently running Transcode Sessions.
Returns a new :class:`~plexapi.alert.AlertListener` object.
Note: ``websocket-client`` must be installed in order to use this feature.
.. code-block:: python
>> pip install websocket-client
Parameters:
callback (func): Callback function to call on received messages.
callbackError (func): Callback function to call on errors.
Raises:
:exc:`~plexapi.exception.Unsupported`: Websocket-client not installed.
"""
notifier = AlertListener(self, callback, callbackError)
notifier.start()
return notifier
def transcodeImage(self, imageUrl, height, width,
opacity=None, saturation=None, blur=None, background=None,
minSize=True, upscale=True, imageFormat=None):
""" Returns the URL for a transcoded image.
Parameters:
imageUrl (str): The URL to the image
(eg. returned by :func:`~plexapi.mixins.PosterUrlMixin.thumbUrl`
or :func:`~plexapi.mixins.ArtUrlMixin.artUrl`).
The URL can be an online image.
height (int): Height to transcode the image to.
width (int): Width to transcode the image to.
opacity (int, optional): Change the opacity of the image (0 to 100)
saturation (int, optional): Change the saturation of the image (0 to 100).
blur (int, optional): The blur to apply to the image in pixels (e.g. 3).
background (str, optional): The background hex colour to apply behind the opacity (e.g. '000000').
minSize (bool, optional): Maintain smallest dimension. Default True.
upscale (bool, optional): Upscale the image if required. Default True.
imageFormat (str, optional): 'jpeg' (default) or 'png'.
"""
params = {
'url': imageUrl,
'height': height,
'width': width,
'minSize': int(bool(minSize)),
'upscale': int(bool(upscale))
}
if opacity is not None:
params['opacity'] = opacity
if saturation is not None:
params['saturation'] = saturation
if blur is not None:
params['blur'] = blur
if background is not None:
params['background'] = str(background).strip('#')
if imageFormat is not None:
params['format'] = imageFormat.lower()
key = f'/photo/:/transcode{utils.joinArgs(params)}'
return self.url(key, includeToken=True)
def url(self, key, includeToken=None):
""" Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'.
"""
if self._token and (includeToken or self._showSecrets):
delim = '&' if '?' in key else '?'
return f'{self._baseurl}{key}{delim}X-Plex-Token={self._token}'
return f'{self._baseurl}{key}'
def refreshSynclist(self):
""" Force PMS to download new SyncList from Plex.tv. """
return self.query('/sync/refreshSynclists', self._session.put)
def refreshContent(self):
""" Force PMS to refresh content for known SyncLists. """
return self.query('/sync/refreshContent', self._session.put)
def refreshSync(self):
""" Calls :func:`~plexapi.server.PlexServer.refreshSynclist` and
:func:`~plexapi.server.PlexServer.refreshContent`, just like the Plex Web UI does when you click 'refresh'.
"""
self.refreshSynclist()
self.refreshContent()
def _allowMediaDeletion(self, toggle=False):
""" Toggle allowMediaDeletion.
Parameters:
toggle (bool): True enables Media Deletion
False or None disable Media Deletion (Default)
"""
if self.allowMediaDeletion and toggle is False:
log.debug('Plex is currently allowed to delete media. Toggling off.')
elif self.allowMediaDeletion and toggle is True:
log.debug('Plex is currently allowed to delete media. Toggle set to allow, exiting.')
raise BadRequest('Plex is currently allowed to delete media. Toggle set to allow, exiting.')
elif self.allowMediaDeletion is None and toggle is True:
log.debug('Plex is currently not allowed to delete media. Toggle set to allow.')
else:
log.debug('Plex is currently not allowed to delete media. Toggle set to not allow, exiting.')
raise BadRequest('Plex is currently not allowed to delete media. Toggle set to not allow, exiting.')
value = 1 if toggle is True else 0
return self.query(f'/:/prefs?allowMediaDeletion={value}', self._session.put)
def bandwidth(self, timespan=None, **kwargs):
""" Returns a list of :class:`~plexapi.server.StatisticsBandwidth` objects
with the Plex server dashboard bandwidth data.
Parameters:
timespan (str, optional): The timespan to bin the bandwidth data. Default is seconds.
Available timespans: seconds, hours, days, weeks, months.
**kwargs (dict, optional): Any of the available filters that can be applied to the bandwidth data.
The time frame (at) and bytes can also be filtered using less than or greater than (see examples below).
* accountID (int): The :class:`~plexapi.server.SystemAccount` ID to filter.
* at (datetime): The time frame to filter (inclusive). The time frame can be either:
1. An exact time frame (e.g. Only December 1st 2020 `at=datetime(2020, 12, 1)`).
2. Before a specific time (e.g. Before and including December 2020 `at<=datetime(2020, 12, 1)`).
3. After a specific time (e.g. After and including January 2021 `at>=datetime(2021, 1, 1)`).
* bytes (int): The amount of bytes to filter (inclusive). The bytes can be either:
1. An exact number of bytes (not very useful) (e.g. `bytes=1024**3`).
2. Less than or equal number of bytes (e.g. `bytes<=1024**3`).
3. Greater than or equal number of bytes (e.g. `bytes>=1024**3`).
* deviceID (int): The :class:`~plexapi.server.SystemDevice` ID to filter.
* lan (bool): True to only retrieve local bandwidth, False to only retrieve remote bandwidth.
Default returns all local and remote bandwidth.
Raises:
:exc:`~plexapi.exceptions.BadRequest`: When applying an invalid timespan or unknown filter.
Example:
.. code-block:: python
from plexapi.server import PlexServer
plex = PlexServer('http://localhost:32400', token='xxxxxxxxxxxxxxxxxxxx')
# Filter bandwidth data for December 2020 and later, and more than 1 GB used.
filters = {
'at>': datetime(2020, 12, 1),
'bytes>': 1024**3
}
# Retrieve bandwidth data in one day timespans.
bandwidthData = plex.bandwidth(timespan='days', **filters)
# Print out bandwidth usage for each account and device combination.
for bandwidth in sorted(bandwidthData, key=lambda x: x.at):
account = bandwidth.account()
device = bandwidth.device()
gigabytes = round(bandwidth.bytes / 1024**3, 3)
local = 'local' if bandwidth.lan else 'remote'
date = bandwidth.at.strftime('%Y-%m-%d')
print(f'{account.name} used {gigabytes} GB of {local} bandwidth on {date} from {device.name}')
"""
params = {}
if timespan is None:
params['timespan'] = 6 # Default to seconds
else:
timespans = {
'seconds': 6,
'hours': 4,
'days': 3,
'weeks': 2,
'months': 1
}
try:
params['timespan'] = timespans[timespan]
except KeyError:
raise BadRequest(f"Invalid timespan specified: {timespan}. "
f"Available timespans: {', '.join(timespans.keys())}")
filters = {'accountID', 'at', 'at<', 'at>', 'bytes', 'bytes<', 'bytes>', 'deviceID', 'lan'}
for key, value in kwargs.items():
if key not in filters:
raise BadRequest(f'Unknown filter: {key}={value}')
if key.startswith('at'):
try:
value = utils.cast(int, value.timestamp())
except AttributeError:
raise BadRequest(f'Time frame filter must be a datetime object: {key}={value}')
elif key.startswith('bytes') or key == 'lan':
value = utils.cast(int, value)
elif key == 'accountID':
if value == self.myPlexAccount().id:
value = 1 # The admin account is accountID=1
params[key] = value
key = f'/statistics/bandwidth?{urlencode(params)}'
return self.fetchItems(key, StatisticsBandwidth)
def resources(self):
""" Returns a list of :class:`~plexapi.server.StatisticsResources` objects
with the Plex server dashboard resources data. """
key = '/statistics/resources?timespan=6'
return self.fetchItems(key, StatisticsResources)
def _buildWebURL(self, base=None, endpoint=None, **kwargs):
""" Build the Plex Web URL for the object.
Parameters:
base (str): The base URL before the fragment (``#!``).
Default is https://app.plex.tv/desktop.
endpoint (str): The Plex Web URL endpoint.
None for server, 'playlist' for playlists, 'details' for all other media types.
**kwargs (dict): Dictionary of URL parameters.
"""
if base is None:
base = 'https://app.plex.tv/desktop/'
if endpoint:
return f'{base}#!/server/{self.machineIdentifier}/{endpoint}{utils.joinArgs(kwargs)}'
else:
return f'{base}#!/media/{self.machineIdentifier}/com.plexapp.plugins.library{utils.joinArgs(kwargs)}'
def getWebURL(self, base=None, playlistTab=None):
""" Returns the Plex Web URL for the server.
Parameters:
base (str): The base URL before the fragment (``#!``).
Default is https://app.plex.tv/desktop.
playlistTab (str): The playlist tab (audio, video, photo). Only used for the playlist URL.
"""
if playlistTab is not None:
params = {'source': 'playlists', 'pivot': f'playlists.{playlistTab}'}
else:
params = {'key': '/hubs', 'pageType': 'hub'}
return self._buildWebURL(base=base, **params)
class Account(PlexObject):
""" Contains the locally cached MyPlex account information. The properties provided don't
match the :class:`~plexapi.myplex.MyPlexAccount` object very well. I believe this exists
because access to myplex is not required to get basic plex information. I can't imagine
object is terribly useful except unless you were needed this information while offline.
Parameters:
server (:class:`~plexapi.server.PlexServer`): PlexServer this account is connected to (optional)
data (ElementTree): Response from PlexServer used to build this object (optional).
Attributes:
authToken (str): Plex authentication token to access the server.
mappingError (str): Unknown
mappingErrorMessage (str): Unknown
mappingState (str): Unknown
privateAddress (str): Local IP address of the Plex server.
privatePort (str): Local port of the Plex server.
publicAddress (str): Public IP address of the Plex server.
publicPort (str): Public port of the Plex server.
signInState (str): Signin state for this account (ex: ok).
subscriptionActive (str): True if the account subscription is active.
subscriptionFeatures (str): List of features allowed by the server for this account.
This may be based on your PlexPass subscription. Features include: camera_upload,
cloudsync, content_filter, dvr, hardware_transcoding, home, lyrics, music_videos,
pass, photo_autotags, premium_music_metadata, session_bandwidth_restrictions,
sync, trailers, webhooks' (and maybe more).
subscriptionState (str): 'Active' if this subscription is active.
username (str): Plex account username (user@example.com).
"""
key = '/myplex/account'
def _loadData(self, data):
self._data = data
self.authToken = data.attrib.get('authToken')
self.username = data.attrib.get('username')
self.mappingState = data.attrib.get('mappingState')
self.mappingError = data.attrib.get('mappingError')
self.mappingErrorMessage = data.attrib.get('mappingErrorMessage')
self.signInState = data.attrib.get('signInState')
self.publicAddress = data.attrib.get('publicAddress')
self.publicPort = data.attrib.get('publicPort')
self.privateAddress = data.attrib.get('privateAddress')
self.privatePort = data.attrib.get('privatePort')
self.subscriptionFeatures = utils.toList(data.attrib.get('subscriptionFeatures'))
self.subscriptionActive = utils.cast(bool, data.attrib.get('subscriptionActive'))
self.subscriptionState = data.attrib.get('subscriptionState')
class Activity(PlexObject):
"""A currently running activity on the PlexServer."""
key = '/activities'
def _loadData(self, data):
self._data = data
self.cancellable = utils.cast(bool, data.attrib.get('cancellable'))
self.progress = utils.cast(int, data.attrib.get('progress'))
self.title = data.attrib.get('title')
self.subtitle = data.attrib.get('subtitle')
self.type = data.attrib.get('type')
self.uuid = data.attrib.get('uuid')
@utils.registerPlexObject
class Release(PlexObject):
TAG = 'Release'
key = '/updater/status'
def _loadData(self, data):
self.download_key = data.attrib.get('key')
self.version = data.attrib.get('version')
self.added = data.attrib.get('added')
self.fixed = data.attrib.get('fixed')
self.downloadURL = data.attrib.get('downloadURL')
self.state = data.attrib.get('state')
class SystemAccount(PlexObject):
""" Represents a single system account.
Attributes:
TAG (str): 'Account'
autoSelectAudio (bool): True or False if the account has automatic audio language enabled.
defaultAudioLanguage (str): The default audio language code for the account.
defaultSubtitleLanguage (str): The default subtitle language code for the account.
id (int): The Plex account ID.
key (str): API URL (/accounts/<id>)
name (str): The username of the account.
subtitleMode (bool): The subtitle mode for the account.
thumb (str): URL for the account thumbnail.
"""
TAG = 'Account'
def _loadData(self, data):
self._data = data
self.autoSelectAudio = utils.cast(bool, data.attrib.get('autoSelectAudio'))
self.defaultAudioLanguage = data.attrib.get('defaultAudioLanguage')
self.defaultSubtitleLanguage = data.attrib.get('defaultSubtitleLanguage')
self.id = utils.cast(int, data.attrib.get('id'))
self.key = data.attrib.get('key')
self.name = data.attrib.get('name')
self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode'))
self.thumb = data.attrib.get('thumb')
# For backwards compatibility
self.accountID = self.id
self.accountKey = self.key
class SystemDevice(PlexObject):
""" Represents a single system device.
Attributes:
TAG (str): 'Device'
clientIdentifier (str): The unique identifier for the device.
createdAt (datetime): Datetime the device was created.
id (int): The ID of the device (not the same as :class:`~plexapi.myplex.MyPlexDevice` ID).
key (str): API URL (/devices/<id>)
name (str): The name of the device.
platform (str): OS the device is running (Linux, Windows, Chrome, etc.)
"""
TAG = 'Device'
def _loadData(self, data):
self._data = data
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.id = utils.cast(int, data.attrib.get('id'))
self.key = f'/devices/{self.id}'
self.name = data.attrib.get('name')
self.platform = data.attrib.get('platform')
class StatisticsBandwidth(PlexObject):
""" Represents a single statistics bandwidth data.
Attributes:
TAG (str): 'StatisticsBandwidth'
accountID (int): The associated :class:`~plexapi.server.SystemAccount` ID.
at (datetime): Datetime of the bandwidth data.
bytes (int): The total number of bytes for the specified time span.
deviceID (int): The associated :class:`~plexapi.server.SystemDevice` ID.
lan (bool): True or False whether the bandwidth is local or remote.
timespan (int): The time span for the bandwidth data.
1: months, 2: weeks, 3: days, 4: hours, 6: seconds.
"""
TAG = 'StatisticsBandwidth'
def _loadData(self, data):
self._data = data
self.accountID = utils.cast(int, data.attrib.get('accountID'))
self.at = utils.toDatetime(data.attrib.get('at'))
self.bytes = utils.cast(int, data.attrib.get('bytes'))
self.deviceID = utils.cast(int, data.attrib.get('deviceID'))
self.lan = utils.cast(bool, data.attrib.get('lan'))
self.timespan = utils.cast(int, data.attrib.get('timespan'))
def __repr__(self):
return '<{}>'.format(
':'.join([p for p in [
self.__class__.__name__,
self._clean(self.accountID),
self._clean(self.deviceID),
self._clean(int(self.at.timestamp()))
] if p])
)
def account(self):
""" Returns the :class:`~plexapi.server.SystemAccount` associated with the bandwidth data. """
return self._server.systemAccount(self.accountID)
def device(self):
""" Returns the :class:`~plexapi.server.SystemDevice` associated with the bandwidth data. """
return self._server.systemDevice(self.deviceID)
class StatisticsResources(PlexObject):
""" Represents a single statistics resources data.
Attributes:
TAG (str): 'StatisticsResources'
at (datetime): Datetime of the resource data.
hostCpuUtilization (float): The system CPU usage %.
hostMemoryUtilization (float): The Plex Media Server CPU usage %.
processCpuUtilization (float): The system RAM usage %.
processMemoryUtilization (float): The Plex Media Server RAM usage %.
timespan (int): The time span for the resource data (6: seconds).
"""
TAG = 'StatisticsResources'
def _loadData(self, data):
self._data = data
self.at = utils.toDatetime(data.attrib.get('at'))
self.hostCpuUtilization = utils.cast(float, data.attrib.get('hostCpuUtilization'))
self.hostMemoryUtilization = utils.cast(float, data.attrib.get('hostMemoryUtilization'))
self.processCpuUtilization = utils.cast(float, data.attrib.get('processCpuUtilization'))
self.processMemoryUtilization = utils.cast(float, data.attrib.get('processMemoryUtilization'))
self.timespan = utils.cast(int, data.attrib.get('timespan'))
def __repr__(self):
return f"<{':'.join([p for p in [self.__class__.__name__, self._clean(int(self.at.timestamp()))] if p])}>"
@utils.registerPlexObject
class ButlerTask(PlexObject):
""" Represents a single scheduled butler task.
Attributes:
TAG (str): 'ButlerTask'
description (str): The description of the task.
enabled (bool): Whether the task is enabled.
interval (int): The interval the task is run in days.
name (str): The name of the task.
scheduleRandomized (bool): Whether the task schedule is randomized.
title (str): The title of the task.
"""
TAG = 'ButlerTask'
def _loadData(self, data):
self._data = data
self.description = data.attrib.get('description')
self.enabled = utils.cast(bool, data.attrib.get('enabled'))
self.interval = utils.cast(int, data.attrib.get('interval'))
self.name = data.attrib.get('name')
self.scheduleRandomized = utils.cast(bool, data.attrib.get('scheduleRandomized'))
self.title = data.attrib.get('title')
| bsd-3-clause | 24b93ef0d84092023427f98178904b2a | 48.312808 | 124 | 0.616486 | 4.358708 | false | false | false | false |
pinax/symposion | symposion/schedule/models.py | 2 | 9090 | from __future__ import unicode_literals
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from symposion.markdown_parser import parse
from symposion.proposals.models import ProposalBase
from symposion.conference.models import Section
from symposion.speakers.models import Speaker
@python_2_unicode_compatible
class Schedule(models.Model):
section = models.OneToOneField(Section, verbose_name=_("Section"))
published = models.BooleanField(default=True, verbose_name=_("Published"))
hidden = models.BooleanField(_("Hide schedule from overall conference view"), default=False)
def __str__(self):
return "%s Schedule" % self.section
class Meta:
ordering = ["section"]
verbose_name = _('Schedule')
verbose_name_plural = _('Schedules')
@python_2_unicode_compatible
class Day(models.Model):
schedule = models.ForeignKey(Schedule, verbose_name=_("Schedule"))
date = models.DateField(verbose_name=_("Date"))
def __str__(self):
return "%s" % self.date
class Meta:
unique_together = [("schedule", "date")]
ordering = ["date"]
verbose_name = _("date")
verbose_name_plural = _("dates")
@python_2_unicode_compatible
class Room(models.Model):
schedule = models.ForeignKey(Schedule, verbose_name=_("Schedule"))
name = models.CharField(max_length=65, verbose_name=_("Name"))
order = models.PositiveIntegerField(verbose_name=_("Order"))
def __str__(self):
return self.name
class Meta:
verbose_name = _("Room")
verbose_name_plural = _("Rooms")
@python_2_unicode_compatible
class SlotKind(models.Model):
"""
A slot kind represents what kind a slot is. For example, a slot can be a
break, lunch, or X-minute talk.
"""
schedule = models.ForeignKey(Schedule, verbose_name=_("schedule"))
label = models.CharField(max_length=50, verbose_name=_("Label"))
def __str__(self):
return self.label
class Meta:
verbose_name = _("Slot kind")
verbose_name_plural = _("Slot kinds")
@python_2_unicode_compatible
class Slot(models.Model):
name = models.CharField(max_length=100, editable=False)
day = models.ForeignKey(Day, verbose_name=_("Day"))
kind = models.ForeignKey(SlotKind, verbose_name=_("Kind"))
start = models.TimeField(verbose_name=_("Start"))
end = models.TimeField(verbose_name=_("End"))
content_override = models.TextField(blank=True, verbose_name=_("Content override"))
content_override_html = models.TextField(blank=True)
def assign(self, content):
"""
Assign the given content to this slot and if a previous slot content
was given we need to unlink it to avoid integrity errors.
"""
self.unassign()
content.slot = self
content.save()
def unassign(self):
"""
Unassign the associated content with this slot.
"""
content = self.content
if content and content.slot_id:
content.slot = None
content.save()
@property
def content(self):
"""
Return the content this slot represents.
@@@ hard-coded for presentation for now
"""
try:
return self.content_ptr
except ObjectDoesNotExist:
return None
@property
def start_datetime(self):
return datetime.datetime(
self.day.date.year,
self.day.date.month,
self.day.date.day,
self.start.hour,
self.start.minute)
@property
def end_datetime(self):
return datetime.datetime(
self.day.date.year,
self.day.date.month,
self.day.date.day,
self.end.hour,
self.end.minute)
@property
def length_in_minutes(self):
return int(
(self.end_datetime - self.start_datetime).total_seconds() / 60)
@property
def rooms(self):
return Room.objects.filter(pk__in=self.slotroom_set.values("room"))
def save(self, *args, **kwargs):
roomlist = ' '.join(map(lambda r: r.__unicode__(), self.rooms))
self.name = "%s %s (%s - %s) %s" % (self.day, self.kind, self.start, self.end, roomlist)
self.content_override_html = parse(self.content_override)
super(Slot, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
ordering = ["day", "start", "end"]
verbose_name = _("slot")
verbose_name_plural = _("slots")
@python_2_unicode_compatible
class SlotRoom(models.Model):
"""
Links a slot with a room.
"""
slot = models.ForeignKey(Slot, verbose_name=_("Slot"))
room = models.ForeignKey(Room, verbose_name=_("Room"))
def __str__(self):
return "%s %s" % (self.room, self.slot)
class Meta:
unique_together = [("slot", "room")]
ordering = ["slot", "room__order"]
verbose_name = _("Slot room")
verbose_name_plural = _("Slot rooms")
@python_2_unicode_compatible
class Presentation(models.Model):
slot = models.OneToOneField(Slot, null=True, blank=True, related_name="content_ptr", verbose_name=_("Slot"))
title = models.CharField(max_length=100, verbose_name=_("Title"))
description = models.TextField(verbose_name=_("Description"))
description_html = models.TextField(blank=True)
abstract = models.TextField(verbose_name=_("Abstract"))
abstract_html = models.TextField(blank=True)
speaker = models.ForeignKey(Speaker, related_name="presentations", verbose_name=_("Speaker"))
additional_speakers = models.ManyToManyField(Speaker, related_name="copresentations",
blank=True, verbose_name=_("Additional speakers"))
cancelled = models.BooleanField(default=False, verbose_name=_("Cancelled"))
proposal_base = models.OneToOneField(ProposalBase, related_name="presentation", verbose_name=_("Proposal base"))
section = models.ForeignKey(Section, related_name="presentations", verbose_name=_("Section"))
def save(self, *args, **kwargs):
self.description_html = parse(self.description)
self.abstract_html = parse(self.abstract)
return super(Presentation, self).save(*args, **kwargs)
@property
def number(self):
return self.proposal.number
@property
def proposal(self):
if self.proposal_base_id is None:
return None
return ProposalBase.objects.get_subclass(pk=self.proposal_base_id)
def speakers(self):
yield self.speaker
for speaker in self.additional_speakers.all():
if speaker.user:
yield speaker
def __str__(self):
return "#%s %s (%s)" % (self.number, self.title, self.speaker)
class Meta:
ordering = ["slot"]
verbose_name = _("presentation")
verbose_name_plural = _("presentations")
@python_2_unicode_compatible
class Session(models.Model):
day = models.ForeignKey(Day, related_name="sessions", verbose_name=_("Day"))
slots = models.ManyToManyField(Slot, related_name="sessions", verbose_name=_("Slots"))
def sorted_slots(self):
return self.slots.order_by("start")
def start(self):
slots = self.sorted_slots()
if slots:
return list(slots)[0].start
else:
return None
def end(self):
slots = self.sorted_slots()
if slots:
return list(slots)[-1].end
else:
return None
def __str__(self):
start = self.start()
end = self.end()
if start and end:
return "%s: %s - %s" % (
self.day.date.strftime("%a"),
start.strftime("%X"),
end.strftime("%X")
)
return ""
class Meta:
verbose_name = _("Session")
verbose_name_plural = _("Sessions")
@python_2_unicode_compatible
class SessionRole(models.Model):
SESSION_ROLE_CHAIR = 1
SESSION_ROLE_RUNNER = 2
SESSION_ROLE_TYPES = [
(SESSION_ROLE_CHAIR, _("Session Chair")),
(SESSION_ROLE_RUNNER, _("Session Runner")),
]
session = models.ForeignKey(Session, verbose_name=_("Session"))
user = models.ForeignKey(User, verbose_name=_("User"))
role = models.IntegerField(choices=SESSION_ROLE_TYPES, verbose_name=_("Role"))
status = models.NullBooleanField(verbose_name=_("Status"))
submitted = models.DateTimeField(default=datetime.datetime.now)
class Meta:
unique_together = [("session", "user", "role")]
verbose_name = _("Session role")
verbose_name_plural = _("Session roles")
def __str__(self):
return "%s %s: %s" % (self.user, self.session,
self.SESSION_ROLE_TYPES[self.role - 1][1])
| bsd-3-clause | e2283d1126de40240542b318c85c3be8 | 30.023891 | 116 | 0.616612 | 4.022124 | false | false | false | false |
pinax/symposion | symposion/sponsorship/forms.py | 5 | 2692 | from __future__ import unicode_literals
from django import forms
from django.forms.models import inlineformset_factory, BaseInlineFormSet
from django.contrib.admin.widgets import AdminFileWidget
from django.utils.translation import ugettext_lazy as _
from symposion.sponsorship.models import Sponsor, SponsorBenefit
class SponsorApplicationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
kwargs.update({
"initial": {
"contact_name": self.user.get_full_name,
"contact_email": self.user.email,
}
})
super(SponsorApplicationForm, self).__init__(*args, **kwargs)
class Meta:
model = Sponsor
fields = [
"name",
"external_url",
"contact_name",
"contact_email",
"level"
]
def save(self, commit=True):
obj = super(SponsorApplicationForm, self).save(commit=False)
obj.applicant = self.user
if commit:
obj.save()
return obj
class SponsorDetailsForm(forms.ModelForm):
class Meta:
model = Sponsor
fields = [
"name",
"external_url",
"contact_name",
"contact_email"
]
class SponsorBenefitsInlineFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
kwargs['queryset'] = kwargs.get('queryset', self.model._default_manager).exclude(benefit__type="option")
super(SponsorBenefitsInlineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
form = super(SponsorBenefitsInlineFormSet, self)._construct_form(i, **kwargs)
# only include the relevant data fields for this benefit type
fields = form.instance.data_fields()
form.fields = dict((k, v) for (k, v) in form.fields.items() if k in fields + ["id"])
for field in fields:
# don't need a label, the form template will label it with the benefit name
form.fields[field].label = ""
# provide word limit as help_text
if form.instance.benefit.type == "text" and form.instance.max_words:
form.fields[field].help_text = _("maximum %s words") % form.instance.max_words
# use admin file widget that shows currently uploaded file
if field == "upload":
form.fields[field].widget = AdminFileWidget()
return form
SponsorBenefitsFormSet = inlineformset_factory(
Sponsor, SponsorBenefit,
formset=SponsorBenefitsInlineFormSet,
can_delete=False, extra=0,
fields=["text", "upload"]
)
| bsd-3-clause | 7a3e5a4930d64be0a0476cfbc176d944 | 31.433735 | 112 | 0.610327 | 4.035982 | false | false | false | false |
pinax/symposion | symposion/speakers/views.py | 2 | 4402 | from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from account.decorators import login_required
from symposion.proposals.models import ProposalBase
from symposion.speakers.forms import SpeakerForm
from symposion.speakers.models import Speaker
@login_required
def speaker_create(request):
try:
return redirect(request.user.speaker_profile)
except ObjectDoesNotExist:
pass
if request.method == "POST":
try:
speaker = Speaker.objects.get(invite_email=request.user.email)
found = True
except Speaker.DoesNotExist:
speaker = None
found = False
form = SpeakerForm(request.POST, request.FILES, instance=speaker)
if form.is_valid():
speaker = form.save(commit=False)
speaker.user = request.user
if not found:
speaker.invite_email = None
speaker.save()
messages.success(request, _("Speaker profile created."))
return redirect("dashboard")
else:
form = SpeakerForm(initial={"name": request.user.get_full_name()})
return render(request, "symposion/speakers/speaker_create.html", {
"speaker_form": form,
})
@login_required
def speaker_create_staff(request, pk):
user = get_object_or_404(User, pk=pk)
if not request.user.is_staff:
raise Http404
try:
return redirect(user.speaker_profile)
except ObjectDoesNotExist:
pass
if request.method == "POST":
form = SpeakerForm(request.POST, request.FILES)
if form.is_valid():
speaker = form.save(commit=False)
speaker.user = user
speaker.save()
messages.success(request, _("Speaker profile created."))
return redirect("user_list")
else:
form = SpeakerForm(initial={"name": user.get_full_name()})
return render(request, "symposion/speakers/speaker_create.html", {
"speaker_form": form,
})
def speaker_create_token(request, token):
speaker = get_object_or_404(Speaker, invite_token=token)
request.session["pending-token"] = token
if request.user.is_authenticated():
# check for speaker profile
try:
existing_speaker = request.user.speaker_profile
except ObjectDoesNotExist:
pass
else:
del request.session["pending-token"]
additional_speakers = ProposalBase.additional_speakers.through
additional_speakers._default_manager.filter(
speaker=speaker
).update(
speaker=existing_speaker
)
messages.info(request, _("You have been associated with all pending "
"talk proposals"))
return redirect("dashboard")
else:
if not request.user.is_authenticated():
return redirect("account_login")
return redirect("speaker_create")
@login_required
def speaker_edit(request, pk=None):
if pk is None:
try:
speaker = request.user.speaker_profile
except Speaker.DoesNotExist:
return redirect("speaker_create")
else:
if request.user.is_staff:
speaker = get_object_or_404(Speaker, pk=pk)
else:
raise Http404()
if request.method == "POST":
form = SpeakerForm(request.POST, request.FILES, instance=speaker)
if form.is_valid():
form.save()
messages.success(request, "Speaker profile updated.")
return redirect("dashboard")
else:
form = SpeakerForm(instance=speaker)
return render(request, "symposion/speakers/speaker_edit.html", {
"speaker_form": form,
})
def speaker_profile(request, pk):
speaker = get_object_or_404(Speaker, pk=pk)
presentations = speaker.all_presentations
if not presentations and not request.user.is_staff:
raise Http404()
return render(request, "symposion/speakers/speaker_profile.html", {
"speaker": speaker,
"presentations": presentations,
})
| bsd-3-clause | a90ce88392d60bb92feefeb2d9a720a0 | 30.898551 | 81 | 0.629487 | 4.148916 | false | false | false | false |
pinax/symposion | symposion/reviews/views.py | 2 | 18632 | from django.core.mail import send_mass_mail
from django.db.models import Q
from django.http import HttpResponseBadRequest, HttpResponseNotAllowed
from django.shortcuts import render, redirect, get_object_or_404
from django.template import Context, Template
from django.views.decorators.http import require_POST
from account.decorators import login_required
# @@@ switch to pinax-teams
from symposion.teams.models import Team
from symposion.conf import settings
from symposion.proposals.models import ProposalBase, ProposalSection
from symposion.utils.mail import send_email
from symposion.reviews.forms import ReviewForm, SpeakerCommentForm
from symposion.reviews.forms import BulkPresentationForm
from symposion.reviews.models import (
ReviewAssignment, Review, LatestVote, ProposalResult, NotificationTemplate,
ResultNotification
)
def access_not_permitted(request):
return render(request, "symposion/reviews/access_not_permitted.html")
def proposals_generator(request, queryset, user_pk=None, check_speaker=True):
for obj in queryset:
# @@@ this sucks; we can do better
if check_speaker:
if request.user in [s.user for s in obj.speakers()]:
continue
try:
obj.result
except ProposalResult.DoesNotExist:
ProposalResult.objects.get_or_create(proposal=obj)
obj.comment_count = obj.result.comment_count
obj.total_votes = obj.result.vote_count
obj.plus_one = obj.result.plus_one
obj.plus_zero = obj.result.plus_zero
obj.minus_zero = obj.result.minus_zero
obj.minus_one = obj.result.minus_one
lookup_params = dict(proposal=obj)
if user_pk:
lookup_params["user__pk"] = user_pk
else:
lookup_params["user"] = request.user
try:
obj.user_vote = LatestVote.objects.get(**lookup_params).vote
obj.user_vote_css = LatestVote.objects.get(**lookup_params).css_class()
except LatestVote.DoesNotExist:
obj.user_vote = None
obj.user_vote_css = "no-vote"
yield obj
# Returns a list of all proposals, proposals reviewed by the user, or the proposals the user has
# yet to review depending on the link user clicks in dashboard
@login_required
def review_section(request, section_slug, assigned=False, reviewed="all"):
if not request.user.has_perm("reviews.can_review_%s" % section_slug):
return access_not_permitted(request)
section = get_object_or_404(ProposalSection, section__slug=section_slug)
queryset = ProposalBase.objects.filter(kind__section=section.section)
if assigned:
assignments = ReviewAssignment.objects.filter(user=request.user)\
.values_list("proposal__id")
queryset = queryset.filter(id__in=assignments)
# passing reviewed in from reviews.urls and out to review_list for
# appropriate template header rendering
if reviewed == "all":
queryset = queryset.select_related("result").select_subclasses()
reviewed = "all_reviews"
elif reviewed == "reviewed":
queryset = queryset.filter(reviews__user=request.user)
reviewed = "user_reviewed"
else:
queryset = queryset.exclude(reviews__user=request.user).exclude(
speaker__user=request.user)
reviewed = "user_not_reviewed"
proposals = proposals_generator(request, queryset)
ctx = {
"proposals": proposals,
"section": section,
"reviewed": reviewed,
}
return render(request, "symposion/reviews/review_list.html", ctx)
@login_required
def review_list(request, section_slug, user_pk):
# if they're not a reviewer admin and they aren't the person whose
# review list is being asked for, don't let them in
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
if not request.user.pk == user_pk:
return access_not_permitted(request)
queryset = ProposalBase.objects.select_related("speaker__user", "result")
reviewed = LatestVote.objects.filter(user__pk=user_pk).values_list("proposal", flat=True)
queryset = queryset.filter(pk__in=reviewed)
proposals = queryset.order_by("submitted")
admin = request.user.has_perm("reviews.can_manage_%s" % section_slug)
proposals = proposals_generator(request, proposals, user_pk=user_pk, check_speaker=not admin)
ctx = {
"proposals": proposals,
}
return render(request, "symposion/reviews/review_list.html", ctx)
@login_required
def review_admin(request, section_slug):
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
def reviewers():
already_seen = set()
for team in Team.objects.filter(permissions__codename="can_review_%s" % section_slug):
for membership in team.memberships.filter(Q(state="member") | Q(state="manager")):
user = membership.user
if user.pk in already_seen:
continue
already_seen.add(user.pk)
user.comment_count = Review.objects.filter(user=user).count()
user.total_votes = LatestVote.objects.filter(user=user).count()
user.plus_one = LatestVote.objects.filter(
user=user,
vote=LatestVote.VOTES.PLUS_ONE
).count()
user.plus_zero = LatestVote.objects.filter(
user=user,
vote=LatestVote.VOTES.PLUS_ZERO
).count()
user.minus_zero = LatestVote.objects.filter(
user=user,
vote=LatestVote.VOTES.MINUS_ZERO
).count()
user.minus_one = LatestVote.objects.filter(
user=user,
vote=LatestVote.VOTES.MINUS_ONE
).count()
yield user
ctx = {
"section_slug": section_slug,
"reviewers": reviewers(),
}
return render(request, "symposion/reviews/review_admin.html", ctx)
# FIXME: This view is too complex according to flake8
@login_required
def review_detail(request, pk):
proposals = ProposalBase.objects.select_related("result").select_subclasses()
proposal = get_object_or_404(proposals, pk=pk)
if not request.user.has_perm("reviews.can_review_%s" % proposal.kind.section.slug):
return access_not_permitted(request)
speakers = [s.user for s in proposal.speakers()]
if not request.user.is_superuser and request.user in speakers:
return access_not_permitted(request)
admin = request.user.is_staff
try:
latest_vote = LatestVote.objects.get(proposal=proposal, user=request.user)
except LatestVote.DoesNotExist:
latest_vote = None
if request.method == "POST":
if request.user in speakers:
return access_not_permitted(request)
if "vote_submit" in request.POST:
review_form = ReviewForm(request.POST)
if review_form.is_valid():
review = review_form.save(commit=False)
review.user = request.user
review.proposal = proposal
review.save()
return redirect(request.path)
else:
message_form = SpeakerCommentForm()
elif "message_submit" in request.POST:
message_form = SpeakerCommentForm(request.POST)
if message_form.is_valid():
message = message_form.save(commit=False)
message.user = request.user
message.proposal = proposal
message.save()
for speaker in speakers:
if speaker and speaker.email:
ctx = {
"proposal": proposal,
"message": message,
"reviewer": False,
}
send_email(
[speaker.email], "proposal_new_message",
context=ctx
)
return redirect(request.path)
else:
initial = {}
if latest_vote:
initial["vote"] = latest_vote.vote
if request.user in speakers:
review_form = None
else:
review_form = ReviewForm(initial=initial)
elif "result_submit" in request.POST:
if admin:
result = request.POST["result_submit"]
if result == "accept":
proposal.result.status = "accepted"
proposal.result.save()
elif result == "reject":
proposal.result.status = "rejected"
proposal.result.save()
elif result == "undecide":
proposal.result.status = "undecided"
proposal.result.save()
elif result == "standby":
proposal.result.status = "standby"
proposal.result.save()
return redirect(request.path)
else:
initial = {}
if latest_vote:
initial["vote"] = latest_vote.vote
if request.user in speakers:
review_form = None
else:
review_form = ReviewForm(initial=initial)
message_form = SpeakerCommentForm()
proposal.comment_count = proposal.result.comment_count
proposal.total_votes = proposal.result.vote_count
proposal.plus_one = proposal.result.plus_one
proposal.plus_zero = proposal.result.plus_zero
proposal.minus_zero = proposal.result.minus_zero
proposal.minus_one = proposal.result.minus_one
reviews = Review.objects.filter(proposal=proposal).order_by("-submitted_at")
messages = proposal.messages.order_by("submitted_at")
return render(request, "symposion/reviews/review_detail.html", {
"proposal": proposal,
"latest_vote": latest_vote,
"reviews": reviews,
"review_messages": messages,
"review_form": review_form,
"message_form": message_form
})
@login_required
@require_POST
def review_delete(request, pk):
review = get_object_or_404(Review, pk=pk)
section_slug = review.section.slug
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
review = get_object_or_404(Review, pk=pk)
review.delete()
return redirect("review_detail", pk=review.proposal.pk)
@login_required
def review_status(request, section_slug=None, key=None):
if not request.user.has_perm("reviews.can_review_%s" % section_slug):
return access_not_permitted(request)
VOTE_THRESHOLD = settings.SYMPOSION_VOTE_THRESHOLD
ctx = {
"section_slug": section_slug,
"vote_threshold": VOTE_THRESHOLD,
}
queryset = ProposalBase.objects.select_related("speaker__user", "result").select_subclasses()
if section_slug:
queryset = queryset.filter(kind__section__slug=section_slug)
proposals = {
# proposals with at least VOTE_THRESHOLD reviews and at least one +1 and no -1s, sorted by
# the 'score'
"positive": queryset.filter(result__vote_count__gte=VOTE_THRESHOLD, result__plus_one__gt=0,
result__minus_one=0).order_by("-result__score"),
# proposals with at least VOTE_THRESHOLD reviews and at least one -1 and no +1s, reverse
# sorted by the 'score'
"negative": queryset.filter(result__vote_count__gte=VOTE_THRESHOLD, result__minus_one__gt=0,
result__plus_one=0).order_by("result__score"),
# proposals with at least VOTE_THRESHOLD reviews and neither a +1 or a -1, sorted by total
# votes (lowest first)
"indifferent": queryset.filter(result__vote_count__gte=VOTE_THRESHOLD, result__minus_one=0,
result__plus_one=0).order_by("result__vote_count"),
# proposals with at least VOTE_THRESHOLD reviews and both a +1 and -1, sorted by total
# votes (highest first)
"controversial": queryset.filter(result__vote_count__gte=VOTE_THRESHOLD,
result__plus_one__gt=0, result__minus_one__gt=0)
.order_by("-result__vote_count"),
# proposals with fewer than VOTE_THRESHOLD reviews
"too_few": queryset.filter(result__vote_count__lt=VOTE_THRESHOLD)
.order_by("result__vote_count"),
}
admin = request.user.has_perm("reviews.can_manage_%s" % section_slug)
for status in proposals:
if key and key != status:
continue
proposals[status] = list(proposals_generator(request, proposals[status], check_speaker=not admin))
if key:
ctx.update({
"key": key,
"proposals": proposals[key],
})
else:
ctx["proposals"] = proposals
return render(request, "symposion/reviews/review_stats.html", ctx)
@login_required
def review_assignments(request):
if not request.user.groups.filter(name="reviewers").exists():
return access_not_permitted(request)
assignments = ReviewAssignment.objects.filter(
user=request.user,
opted_out=False
)
return render(request, "symposion/reviews/review_assignment.html", {
"assignments": assignments,
})
@login_required
@require_POST
def review_assignment_opt_out(request, pk):
review_assignment = get_object_or_404(
ReviewAssignment, pk=pk, user=request.user)
if not review_assignment.opted_out:
review_assignment.opted_out = True
review_assignment.save()
ReviewAssignment.create_assignments(
review_assignment.proposal, origin=ReviewAssignment.AUTO_ASSIGNED_LATER)
return redirect("review_assignments")
@login_required
def review_bulk_accept(request, section_slug):
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
if request.method == "POST":
form = BulkPresentationForm(request.POST)
if form.is_valid():
talk_ids = form.cleaned_data["talk_ids"].split(",")
talks = ProposalBase.objects.filter(id__in=talk_ids).select_related("result")
for talk in talks:
talk.result.status = "accepted"
talk.result.save()
return redirect("review_section", section_slug=section_slug)
else:
form = BulkPresentationForm()
return render(request, "symposion/reviews/review_bulk_accept.html", {
"form": form,
})
@login_required
def result_notification(request, section_slug, status):
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
proposals = ProposalBase.objects.filter(kind__section__slug=section_slug, result__status=status).select_related("speaker__user", "result").select_subclasses()
notification_templates = NotificationTemplate.objects.all()
ctx = {
"section_slug": section_slug,
"status": status,
"proposals": proposals,
"notification_templates": notification_templates,
}
return render(request, "symposion/reviews/result_notification.html", ctx)
@login_required
def result_notification_prepare(request, section_slug, status):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
proposal_pks = []
try:
for pk in request.POST.getlist("_selected_action"):
proposal_pks.append(int(pk))
except ValueError:
return HttpResponseBadRequest()
proposals = ProposalBase.objects.filter(
kind__section__slug=section_slug,
result__status=status,
)
proposals = proposals.filter(pk__in=proposal_pks)
proposals = proposals.select_related("speaker__user", "result")
proposals = proposals.select_subclasses()
notification_template_pk = request.POST.get("notification_template", "")
if notification_template_pk:
notification_template = NotificationTemplate.objects.get(pk=notification_template_pk)
else:
notification_template = None
ctx = {
"section_slug": section_slug,
"status": status,
"notification_template": notification_template,
"proposals": proposals,
"proposal_pks": ",".join([str(pk) for pk in proposal_pks]),
}
return render(request, "symposion/reviews/result_notification_prepare.html", ctx)
@login_required
def result_notification_send(request, section_slug, status):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if not request.user.has_perm("reviews.can_manage_%s" % section_slug):
return access_not_permitted(request)
if not all([k in request.POST for k in ["proposal_pks", "from_address", "subject", "body"]]):
return HttpResponseBadRequest()
try:
proposal_pks = [int(pk) for pk in request.POST["proposal_pks"].split(",")]
except ValueError:
return HttpResponseBadRequest()
proposals = ProposalBase.objects.filter(
kind__section__slug=section_slug,
result__status=status,
)
proposals = proposals.filter(pk__in=proposal_pks)
proposals = proposals.select_related("speaker__user", "result")
proposals = proposals.select_subclasses()
notification_template_pk = request.POST.get("notification_template", "")
if notification_template_pk:
notification_template = NotificationTemplate.objects.get(pk=notification_template_pk)
else:
notification_template = None
emails = []
for proposal in proposals:
rn = ResultNotification()
rn.proposal = proposal
rn.template = notification_template
rn.to_address = proposal.speaker_email
rn.from_address = request.POST["from_address"]
rn.subject = request.POST["subject"]
rn.body = Template(request.POST["body"]).render(
Context({
"proposal": proposal.notification_email_context()
})
)
rn.save()
emails.append(rn.email_args)
send_mass_mail(emails)
return redirect("result_notification", section_slug=section_slug, status=status)
| bsd-3-clause | 669676fa08c2f5fae7b939aa009239e0 | 35.319688 | 162 | 0.625805 | 3.967632 | false | false | false | false |
tryolabs/luminoth | luminoth/utils/anchors.py | 1 | 1818 | import numpy as np
def generate_anchors_reference(base_size, aspect_ratios, scales):
"""Generate base anchor to be used as reference of generating all anchors.
Anchors vary only in width and height. Using the base_size and the
different ratios we can calculate the wanted widths and heights.
Scales apply to area of object.
Args:
base_size (int): Base size of the base anchor (square).
aspect_ratios: Ratios to use to generate different anchors. The ratio
is the value of height / width.
scales: Scaling ratios applied to area.
Returns:
anchors: Numpy array with shape (total_aspect_ratios * total_scales, 4)
with the corner points of the reference base anchors using the
convention (x_min, y_min, x_max, y_max).
"""
scales_grid, aspect_ratios_grid = np.meshgrid(scales, aspect_ratios)
base_scales = scales_grid.reshape(-1)
base_aspect_ratios = aspect_ratios_grid.reshape(-1)
aspect_ratio_sqrts = np.sqrt(base_aspect_ratios)
heights = base_scales * aspect_ratio_sqrts * base_size
widths = base_scales / aspect_ratio_sqrts * base_size
# Center point has the same X, Y value.
center_xy = 0
# Create anchor reference.
anchors = np.column_stack([
center_xy - (widths - 1) / 2,
center_xy - (heights - 1) / 2,
center_xy + (widths - 1) / 2,
center_xy + (heights - 1) / 2,
])
real_heights = (anchors[:, 3] - anchors[:, 1]).astype(np.int)
real_widths = (anchors[:, 2] - anchors[:, 0]).astype(np.int)
if (real_widths == 0).any() or (real_heights == 0).any():
raise ValueError(
'base_size {} is too small for aspect_ratios and scales.'.format(
base_size
)
)
return anchors
| bsd-3-clause | 12fb1fcdb1e86445d0c491200af87174 | 33.961538 | 79 | 0.622662 | 3.695122 | false | false | false | false |
tryolabs/luminoth | luminoth/models/base/truncated_base_network.py | 1 | 6666 | import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_utils, resnet_v1
from luminoth.models.base import BaseNetwork
DEFAULT_ENDPOINTS = {
'resnet_v1_50': 'block3',
'resnet_v1_101': 'block3',
'resnet_v1_152': 'block3',
'resnet_v2_50': 'block3',
'resnet_v2_101': 'block3',
'resnet_v2_152': 'block3',
'vgg_16': 'conv5/conv5_3',
}
class TruncatedBaseNetwork(BaseNetwork):
"""
Feature extractor for images using a regular CNN.
By using the notion of an "endpoint", we truncate a classification CNN at
a certain layer output, and return this partial feature map to be used as
a good image representation for other ML tasks.
"""
def __init__(self, config, name='truncated_base_network', **kwargs):
super(TruncatedBaseNetwork, self).__init__(config, name=name, **kwargs)
self._endpoint = (
config.endpoint or DEFAULT_ENDPOINTS[config.architecture]
)
self._scope_endpoint = '{}/{}/{}'.format(
self.module_name, config.architecture, self._endpoint
)
self._freeze_tail = config.freeze_tail
self._use_tail = config.use_tail
def _build(self, inputs, is_training=False):
"""
Args:
inputs: A Tensor of shape `(batch_size, height, width, channels)`.
Returns:
feature_map: A Tensor of shape
`(batch_size, feature_map_height, feature_map_width, depth)`.
The resulting dimensions depend on the CNN architecture, the
endpoint used, and the dimensions of the input images.
"""
pred = super(TruncatedBaseNetwork, self)._build(
inputs, is_training=is_training
)
return self._get_endpoint(dict(pred['end_points']))
def _build_tail(self, inputs, is_training=False):
if not self._use_tail:
return inputs
if self._architecture == 'resnet_v1_101':
train_batch_norm = (
is_training and self._config.get('train_batch_norm')
)
with self._enter_variable_scope():
weight_decay = (
self._config.get('arg_scope', {}).get('weight_decay', 0)
)
with tf.variable_scope(self._architecture, reuse=True):
resnet_arg_scope = resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
weight_decay=weight_decay
)
with slim.arg_scope(resnet_arg_scope):
with slim.arg_scope(
[slim.batch_norm], is_training=train_batch_norm
):
blocks = [
resnet_utils.Block(
'block4',
resnet_v1.bottleneck,
[{
'depth': 2048,
'depth_bottleneck': 512,
'stride': 1
}] * 3
)
]
proposal_classifier_features = (
resnet_utils.stack_blocks_dense(inputs, blocks)
)
else:
proposal_classifier_features = inputs
return proposal_classifier_features
def get_trainable_vars(self):
"""
Returns a list of the variables that are trainable.
Returns:
trainable_variables: a tuple of `tf.Variable`.
"""
all_trainable = super(TruncatedBaseNetwork, self).get_trainable_vars()
# Get the index of the last endpoint scope variable.
# For example, if the endpoint for ResNet-50 is set as
# "block4/unit_3/bottleneck_v1/conv2", then it will get 155,
# because the variables (with their indexes) are:
# 153 block4/unit_3/bottleneck_v1/conv2/weights:0
# 154 block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta:0
# 155 block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma:0
var_iter = enumerate(v.name for v in all_trainable)
scope_var_index_iter = (
i for i, name in var_iter if self._endpoint in name
)
index = None
for index in scope_var_index_iter:
pass
if index is None:
# Resulting `trainable_vars` is empty, possibly due to the
# `fine_tune_from` starting after the endpoint.
trainable_vars = tuple()
else:
trainable_vars = all_trainable[:index + 1]
if self._use_tail and not self._freeze_tail:
if self._architecture == 'resnet_v1_101':
# Retrieve the trainable vars out of the tail.
# TODO: Tail should be configurable too, to avoid hard-coding
# the trainable portion to `block4` and allow using something
# in block4 as endpoint.
var_iter = enumerate(v.name for v in all_trainable)
try:
index = next(i for i, name in var_iter if 'block4' in name)
except StopIteration:
raise ValueError(
'"block4" not present in the trainable vars retrieved '
'from base network.'
)
trainable_vars += all_trainable[index:]
return trainable_vars
def _get_endpoint(self, endpoints):
"""
Returns the endpoint tensor from the list of possible endpoints.
Since we already have a dictionary with variable names we should be
able to get the desired tensor directly. Unfortunately the variable
names change with scope and the scope changes between TensorFlow
versions. We opted to just select the tensor for which the variable
name ends with the endpoint name we want (it should be just one).
Args:
endpoints: a dictionary with {variable_name: tensor}.
Returns:
endpoint_value: a tensor.
"""
for endpoint_key, endpoint_value in endpoints.items():
if endpoint_key.endswith(self._scope_endpoint):
return endpoint_value
raise ValueError(
'"{}" is an invalid value of endpoint for this '
'architecture.'.format(self._scope_endpoint)
)
| bsd-3-clause | 394ca1017b5ce0b619437d37456ec0ed | 38.443787 | 79 | 0.539904 | 4.559508 | false | true | false | false |
tryolabs/luminoth | luminoth/models/fasterrcnn/rcnn_target.py | 1 | 12823 | import tensorflow as tf
import sonnet as snt
from luminoth.utils.bbox_transform_tf import encode
from luminoth.utils.bbox_overlap import bbox_overlap_tf
class RCNNTarget(snt.AbstractModule):
"""Generate RCNN target tensors for both probabilities and bounding boxes.
Targets for RCNN are based upon the results of the RPN, this can get tricky
in the sense that RPN results might not be the best and it might not be
possible to have the ideal amount of targets for all the available ground
truth boxes.
There are two types of targets, class targets and bounding box targets.
Class targets are used both for background and foreground, while bounding
box targets are only used for foreground (since it's not possible to create
a bounding box of "background objects").
A minibatch size determines how many targets are going to be generated and
how many are going to be ignored. RCNNTarget is responsible for choosing
which proposals and corresponding targets are included in the minibatch and
which ones are completely ignored.
"""
def __init__(self, num_classes, config, seed=None, variances=None,
name='rcnn_proposal'):
"""
Args:
num_classes: Number of possible classes.
config: Configuration object for RCNNTarget.
"""
super(RCNNTarget, self).__init__(name=name)
self._num_classes = num_classes
self._variances = variances
# Ratio of foreground vs background for the minibatch.
self._foreground_fraction = config.foreground_fraction
self._minibatch_size = config.minibatch_size
# IoU lower threshold with a ground truth box to be considered that
# specific class.
self._foreground_threshold = config.foreground_threshold
# High and low treshold to be considered background.
self._background_threshold_high = config.background_threshold_high
self._background_threshold_low = config.background_threshold_low
self._seed = seed
def _build(self, proposals, gt_boxes):
"""
Args:
proposals: A Tensor with the RPN bounding boxes proposals.
The shape of the Tensor is (num_proposals, 4).
gt_boxes: A Tensor with the ground truth boxes for the image.
The shape of the Tensor is (num_gt, 5), having the truth label
as the last value for each box.
Returns:
proposals_label: Either a truth value of the proposals (a value
between 0 and num_classes, with 0 being background), or -1 when
the proposal is to be ignored in the minibatch.
The shape of the Tensor is (num_proposals, 1).
bbox_targets: A bounding box regression target for each of the
proposals that have and greater than zero label. For every
other proposal we return zeros.
The shape of the Tensor is (num_proposals, 4).
"""
overlaps = bbox_overlap_tf(proposals, gt_boxes[:, :4])
# overlaps now contains (num_proposals, num_gt_boxes) with the IoU of
# proposal P and ground truth box G in overlaps[P, G]
# We are going to label each proposal based on the IoU with
# `gt_boxes`. Start by filling the labels with -1, marking them as
# ignored.
proposals_label_shape = tf.gather(tf.shape(proposals), [0])
proposals_label = tf.fill(
dims=proposals_label_shape,
value=-1.
)
# For each overlap there is three possible outcomes for labelling:
# if max(iou) < config.background_threshold_low then we ignore.
# elif max(iou) <= config.background_threshold_high then we label
# background.
# elif max(iou) > config.foreground_threshold then we label with
# the highest IoU in overlap.
#
# max_overlaps gets, for each proposal, the index in which we can
# find the gt_box with which it has the highest overlap.
max_overlaps = tf.reduce_max(overlaps, axis=1)
iou_is_high_enough_for_bg = tf.greater_equal(
max_overlaps, self._background_threshold_low
)
iou_is_not_too_high_for_bg = tf.less(
max_overlaps, self._background_threshold_high
)
bg_condition = tf.logical_and(
iou_is_high_enough_for_bg, iou_is_not_too_high_for_bg
)
proposals_label = tf.where(
condition=bg_condition,
x=tf.zeros_like(proposals_label, dtype=tf.float32),
y=proposals_label
)
# Get the index of the best gt_box for each proposal.
overlaps_best_gt_idxs = tf.argmax(overlaps, axis=1)
# Having the index of the gt bbox with the best label we need to get
# the label for each gt box and sum it one because 0 is used for
# background.
best_fg_labels_for_proposals = tf.add(
tf.gather(gt_boxes[:, 4], overlaps_best_gt_idxs),
1.
)
iou_is_fg = tf.greater_equal(
max_overlaps, self._foreground_threshold
)
best_proposals_idxs = tf.argmax(overlaps, axis=0)
# Set the indices in best_proposals_idxs to True, and the rest to
# false.
# tf.sparse_to_dense is used because we know the set of indices which
# we want to set to True, and we know the rest of the indices
# should be set to False. That's exactly the use case of
# tf.sparse_to_dense.
is_best_box = tf.sparse_to_dense(
sparse_indices=tf.reshape(best_proposals_idxs, [-1]),
sparse_values=True, default_value=False,
output_shape=tf.cast(proposals_label_shape, tf.int64),
validate_indices=False
)
# We update proposals_label with the value in
# best_fg_labels_for_proposals only when the box is foreground.
proposals_label = tf.where(
condition=iou_is_fg,
x=best_fg_labels_for_proposals,
y=proposals_label
)
# Now we need to find the proposals that are the best for each of the
# gt_boxes. We overwrite the previous proposals_label with this
# because setting the best proposal for each gt_box has priority.
best_proposals_gt_labels = tf.sparse_to_dense(
sparse_indices=tf.reshape(best_proposals_idxs, [-1]),
sparse_values=gt_boxes[:, 4] + 1,
default_value=0.,
output_shape=tf.cast(proposals_label_shape, tf.int64),
validate_indices=False,
name="get_right_labels_for_bestboxes"
)
proposals_label = tf.where(
condition=is_best_box,
x=best_proposals_gt_labels,
y=proposals_label,
name="update_labels_for_bestbox_proposals"
)
# proposals_label now has a value in [0, num_classes + 1] for
# proposals we are going to use and -1 for the ones we should ignore.
# But we still need to make sure we don't have a number of proposals
# higher than minibatch_size * foreground_fraction.
max_fg = int(self._foreground_fraction * self._minibatch_size)
fg_condition = tf.logical_or(
iou_is_fg, is_best_box
)
fg_inds = tf.where(
condition=fg_condition
)
def disable_some_fgs():
# We want to delete a randomly-selected subset of fg_inds of
# size `fg_inds.shape[0] - max_fg`.
# We shuffle along the dimension 0 and then we get the first
# num_fg_inds - max_fg indices and we disable them.
shuffled_inds = tf.random_shuffle(fg_inds, seed=self._seed)
disable_place = (tf.shape(fg_inds)[0] - max_fg)
# This function should never run if num_fg_inds <= max_fg, so we
# add an assertion to catch the wrong behaviour if it happens.
integrity_assertion = tf.assert_positive(
disable_place,
message="disable_place in disable_some_fgs is negative."
)
with tf.control_dependencies([integrity_assertion]):
disable_inds = shuffled_inds[:disable_place]
is_disabled = tf.sparse_to_dense(
sparse_indices=disable_inds,
sparse_values=True, default_value=False,
output_shape=tf.cast(proposals_label_shape, tf.int64),
# We are shuffling the indices, so they may not be ordered.
validate_indices=False
)
return tf.where(
condition=is_disabled,
# We set it to -label for debugging purposes.
x=tf.negative(proposals_label),
y=proposals_label
)
# Disable some fgs if we have too many foregrounds.
proposals_label = tf.cond(
tf.greater(tf.shape(fg_inds)[0], max_fg),
true_fn=disable_some_fgs,
false_fn=lambda: proposals_label
)
total_fg_in_batch = tf.shape(
tf.where(
condition=tf.greater(proposals_label, 0)
)
)[0]
# Now we want to do the same for backgrounds.
# We calculate up to how many backgrounds we desire based on the
# final number of foregrounds and the total desired batch size.
max_bg = self._minibatch_size - total_fg_in_batch
# We can't use bg_condition because some of the proposals that satisfy
# the IoU conditions to be background may have been labeled as
# foreground due to them being the best proposal for a certain gt_box.
bg_mask = tf.equal(proposals_label, 0)
bg_inds = tf.where(
condition=bg_mask,
)
def disable_some_bgs():
# Mutatis mutandis, all comments from disable_some_fgs apply.
shuffled_inds = tf.random_shuffle(bg_inds, seed=self._seed)
disable_place = (tf.shape(bg_inds)[0] - max_bg)
integrity_assertion = tf.assert_non_negative(
disable_place,
message="disable_place in disable_some_bgs is negative."
)
with tf.control_dependencies([integrity_assertion]):
disable_inds = shuffled_inds[:disable_place]
is_disabled = tf.sparse_to_dense(
sparse_indices=disable_inds,
sparse_values=True, default_value=False,
output_shape=tf.cast(proposals_label_shape, tf.int64),
validate_indices=False
)
return tf.where(
condition=is_disabled,
x=tf.fill(
dims=proposals_label_shape,
value=-1.
),
y=proposals_label
)
proposals_label = tf.cond(
tf.greater_equal(tf.shape(bg_inds)[0], max_bg),
true_fn=disable_some_bgs,
false_fn=lambda: proposals_label
)
"""
Next step is to calculate the proper targets for the proposals labeled
based on the values of the ground-truth boxes.
We have to use only the proposals labeled >= 1, each matching with
the proper gt_boxes
"""
# Get the ids of the proposals that matter for bbox_target comparisson.
is_proposal_with_target = tf.greater(
proposals_label, 0
)
proposals_with_target_idx = tf.where(
condition=is_proposal_with_target
)
# Get the corresponding ground truth box only for the proposals with
# target.
gt_boxes_idxs = tf.gather(
overlaps_best_gt_idxs,
proposals_with_target_idx
)
# Get the values of the ground truth boxes.
proposals_gt_boxes = tf.gather_nd(
gt_boxes[:, :4], gt_boxes_idxs
)
# We create the same array but with the proposals
proposals_with_target = tf.gather_nd(
proposals,
proposals_with_target_idx
)
# We create our targets with bbox_transform.
bbox_targets_nonzero = encode(
proposals_with_target,
proposals_gt_boxes,
variances=self._variances,
)
# We unmap targets to proposal_labels (containing the length of
# proposals)
bbox_targets = tf.scatter_nd(
indices=proposals_with_target_idx,
updates=bbox_targets_nonzero,
shape=tf.cast(tf.shape(proposals), tf.int64)
)
proposals_label = proposals_label
bbox_targets = bbox_targets
return proposals_label, bbox_targets
| bsd-3-clause | 2768b13189b64bd3eecf7561ddf69662 | 41.886288 | 79 | 0.59908 | 4.019749 | false | false | false | false |
cclib/cclib | cclib/method/bader.py | 3 | 9345 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of Bader's QTAIM charges based on data parsed by cclib."""
import copy
import random
import numpy
import logging
import math
from cclib.method.calculationmethod import Method
from cclib.method.volume import electrondensity_spin
from cclib.parser.utils import convertor
# Distance between two adjacent grids (sqrt[2] or sqrt[3] for uniform Cartesian grid).
_griddist = numpy.array(
[
[[1.73205, 1.41421, 1.73205], [1.41421, 1, 1.41421], [1.73205, 1.41421, 1.73205],],
[[1.41421, 1, 1.41421], [1, 1, 1], [1.41421, 1, 1.41421]],
[[1.73205, 1.41421, 1.73205], [1.41421, 1, 1.41421], [1.73205, 1.41421, 1.73205],],
],
dtype=float,
)
class MissingInputError(Exception):
pass
def __cartesian_dist(pt1, pt2):
""" Small utility function that calculates Euclidian distance between two points
pt1 and pt2 are numpy arrays representing a point in Cartesian coordinates. """
return numpy.sqrt(numpy.einsum("ij,ij->j", pt1 - pt2, pt1 - pt2))
class Bader(Method):
"""Bader's QTAIM charges."""
# All of these are required for QTAIM charges.
required_attrs = ("homos", "mocoeffs", "nbasis", "gbasis")
def __init__(self, data, volume, progress=None, loglevel=logging.INFO, logname="Log"):
super().__init__(data, progress, loglevel, logname)
self.volume = volume
self.fragresults = None
if numpy.sum(self.data.coreelectrons) != 0:
# Pseudopotentials can cause Bader spaces to be inaccurate, as suggested by the
# original paper.
self.logger.info(
"It looks like pseudopotentials were used to generate this output. Please note that the Bader charges may not be accurate and may report unexpected results. Consult the original paper (doi:10.1016/j.commatsci.2005.04.010) for more information."
)
def __str__(self):
"""Return a string representation of the object."""
return f"Bader's QTAIM charges of {self.data}"
def __repr__(self):
"""Return a representation of the object."""
return f"Bader({self.data})"
def _check_required_attributes(self):
super()._check_required_attributes()
def calculate(self, indices=None, fupdate=0.05):
"""Calculate Bader's QTAIM charges using on-grid algorithm proposed by Henkelman group
in doi:10.1016/j.commatsci.2005.04.010
Cartesian, uniformly spaced grids are assumed for this function.
"""
# Obtain charge densities on the grid if it does not contain one.
if not numpy.any(self.volume.data):
self.logger.info("Calculating charge densities on the provided empty grid.")
if len(self.data.mocoeffs) == 1:
self.chgdensity = electrondensity_spin(
self.data, self.volume, [self.data.mocoeffs[0][: self.data.homos[0]]]
)
self.chgdensity.data *= 2
else:
self.chgdensity = electrondensity_spin(
self.data,
self.volume,
[
self.data.mocoeffs[0][: self.data.homos[0]],
self.data.mocoeffs[1][: self.data.homos[1]],
],
)
# If charge densities are provided beforehand, log this information
# `Volume` object does not contain (nor rely on) information about the constituent atoms.
else:
self.logger.info("Using charge densities from the provided Volume object.")
self.chgdensity = self.volume
# Assign each grid point to Bader areas
self.fragresults = numpy.zeros(self.chgdensity.data.shape, "d")
next_index = 1
self.logger.info("Partitioning space into Bader areas.")
# Generator to iterate over the elements excluding the outermost positions
xshape, yshape, zshape = self.chgdensity.data.shape
indices = (
(x, y, z)
for x in range(1, xshape - 1)
for y in range(1, yshape - 1)
for z in range(1, zshape - 1)
)
for xindex, yindex, zindex in indices:
if self.fragresults[xindex, yindex, zindex] != 0:
# index has already been assigned for this grid point
continue
else:
listcoord = []
local_max_reached = False
while not local_max_reached:
# Here, `delta_rho` corresponds to equation 2,
# and `grad_rho_dot_r` corresponds to equation 1 in the aforementioned
# paper (doi:10.1016/j.commatsci.2005.04.010)
delta_rho = (
self.chgdensity.data[
xindex - 1 : xindex + 2,
yindex - 1 : yindex + 2,
zindex - 1 : zindex + 2,
]
- self.chgdensity.data[xindex, yindex, zindex]
)
grad_rho_dot_r = delta_rho / _griddist
maxat = numpy.where(grad_rho_dot_r == numpy.amax(grad_rho_dot_r))
directions = list(zip(maxat[0], maxat[1], maxat[2]))
next_direction = [ind - 1 for ind in directions[0]]
if len(directions) > 1:
# when one or more directions indicate max grad (of 0), prioritize
# to include all points in the Bader space
if directions[0] == [1, 1, 1]:
next_direction = [ind - 1 for ind in directions[1]]
listcoord.append((xindex, yindex, zindex))
bader_candidate_index = self.fragresults[
xindex + next_direction[0],
yindex + next_direction[1],
zindex + next_direction[2],
]
if bader_candidate_index != 0:
# Path arrived at a point that has already been assigned with an index
bader_index = bader_candidate_index
listcoord = tuple(numpy.array(listcoord).T)
self.fragresults[listcoord] = bader_index
local_max_reached = True
elif (
next_direction == [0, 0, 0]
or xindex + next_direction[0] == 0
or xindex + next_direction[0] == (len(self.chgdensity.data) - 1)
or yindex + next_direction[1] == 0
or yindex + next_direction[1] == (len(self.chgdensity.data[0]) - 1)
or zindex + next_direction[2] == 0
or zindex + next_direction[2] == (len(self.chgdensity.data[0][0]) - 1)
):
# When next_direction is [0, 0, 0] -- local maximum
# Other conditions indicate that the path is heading out to edge of
# the grid. Here, assign new Bader space to avoid exiting the grid.
bader_index = next_index
next_index += 1
listcoord = tuple(numpy.array(listcoord).T)
self.fragresults[listcoord] = bader_index
local_max_reached = True
else:
# Advance to the next point according to the direction of
# maximum gradient
xindex += next_direction[0]
yindex += next_direction[1]
zindex += next_direction[2]
# Now try to identify each Bader region to individual atom.
# Try to find an area that captures enough representation
self.matches = numpy.zeros_like(self.data.atomnos)
for pos in range(len(self.data.atomcoords[-1])):
gridpt = numpy.round(
(self.data.atomcoords[-1][pos] - self.volume.origin) / self.volume.spacing
)
xgrid = int(gridpt[0])
ygrid = int(gridpt[1])
zgrid = int(gridpt[2])
self.matches[pos] = self.fragresults[xgrid, ygrid, zgrid]
assert (
0 not in self.matches
), f"Failed to assign Bader regions to atoms. Try with a finer grid. Content of Bader area matches: {self.matches}"
assert len(
numpy.unique(self.matches) != len(self.data.atomnos)
), "Failed to assign unique Bader regions to each atom. Try with a finer grid."
# Finally integrate the assigned Bader areas
self.logger.info("Creating fragcharges: array[1]")
self.fragcharges = numpy.zeros(len(self.data.atomcoords[-1]), "d")
for atom_index, baderarea_index in enumerate(self.matches):
self.fragcharges[atom_index] = self.chgdensity.integrate(
weights=(self.fragresults == baderarea_index)
)
return True
| bsd-3-clause | a46efcbb9981b845ea6219abe29b36aa | 41.866972 | 260 | 0.547352 | 4.063043 | false | false | false | false |
cclib/cclib | cclib/parser/nwchemparser.py | 2 | 64319 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for NWChem output files"""
import itertools
import re
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class NWChem(logfileparser.Logfile):
"""An NWChem log file."""
def __init__(self, *args, **kwargs):
super().__init__(logname="NWChem", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return f"NWChem log file {self.filename}"
def __repr__(self):
"""Return a representation of the object."""
return f'NWChem("{self.filename}")'
def normalisesym(self, label):
"""NWChem does not require normalizing symmetry labels."""
return label
name2element = lambda self, lbl: "".join(itertools.takewhile(str.isalpha, str(lbl)))
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# search for No. of atoms :
if line[:22] == " No. of atoms":
if not hasattr(self, 'natom'):
natom = int(line[28:])
self.set_attribute('natom', natom)
# Extract the version number and the version control information, if
# it exists.
if "nwchem branch" in line:
base_package_version = line.split()[3]
self.metadata["legacy_package_version"] = base_package_version
self.metadata["package_version"] = base_package_version
line = next(inputfile)
if "nwchem revision" in line:
self.metadata[
"package_version"
] = f"{self.metadata['package_version']}+{line.split()[3].split('-')[-1]}"
# This is printed in the input module, so should always be the first coordinates,
# and contains some basic information we want to parse as well. However, this is not
# the only place where the coordinates are printed during geometry optimization,
# since the gradients module has a separate coordinate printout, which happens
# alongside the coordinate gradients. This geometry printout happens at the
# beginning of each optimization step only.
if line.strip() == 'Geometry "geometry" -> ""' or line.strip() == 'Geometry "geometry" -> "geometry"':
self.skip_lines(inputfile, ['dashes', 'blank', 'units', 'blank', 'header', 'dashes'])
if not hasattr(self, 'atomcoords'):
self.atomcoords = []
line = next(inputfile)
coords = []
atomnos = []
while line.strip():
# The column labeled 'tag' is usually empty, but I'm not sure whether it can have spaces,
# so for now assume that it can and that there will be seven columns in that case.
if len(line.split()) == 6:
index, atomname, nuclear, x, y, z = line.split()
else:
index, atomname, tag, nuclear, x, y, z = line.split()
coords.append(list(map(float, [x, y, z])))
atomnos.append(int(float(nuclear)))
line = next(inputfile)
self.atomcoords.append(coords)
self.set_attribute('atomnos', atomnos)
self.set_attribute('natom', len(atomnos))
if line.strip() == "Symmetry information":
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert line[1:11] == "Group name"
point_group_full = line.split()[2].lower()
line = next(inputfile)
assert line[1:13] == "Group number"
line = next(inputfile)
assert line[1:12] == "Group order"
self.pg_order = int(line.split()[2])
# TODO It is unclear what class of point group NWChem
# prints, since it can handle crystallographic groups in
# some places:
# http://www.nwchem-sw.org/index.php/Release66:Geometry
point_group_abelian = point_group_full
self.metadata['symmetry_detected'] = point_group_full
self.metadata['symmetry_used'] = point_group_abelian
# If the geometry is printed in XYZ format, it will have the number of atoms.
if line[12:31] == "XYZ format geometry":
self.skip_line(inputfile, 'dashes')
natom = int(next(inputfile).strip())
self.set_attribute('natom', natom)
if line.strip() == "NWChem Geometry Optimization":
# see cclib/cclib#1057
self.skip_lines(inputfile, ['d', 'b', 'b'])
line = next(inputfile)
if "maximum gradient threshold" not in line:
self.skip_lines(inputfile, ['b', 'title', 'b', 'b'])
line = next(inputfile)
assert "maximum gradient threshold" in line
while line.strip():
if "maximum gradient threshold" in line:
gmax = float(line.split()[-1])
if "rms gradient threshold" in line:
grms = float(line.split()[-1])
if "maximum cartesian step threshold" in line:
xmax = float(line.split()[-1])
if "rms cartesian step threshold" in line:
xrms = float(line.split()[-1])
line = next(inputfile)
self.set_attribute('geotargets', [gmax, grms, xmax, xrms])
# NWChem does not normally print the basis set for each atom, but rather
# chooses the concise option of printing Gaussian coefficients for each
# atom type/element only once. Therefore, we need to first parse those
# coefficients and afterwards build the appropriate gbasis attribute based
# on that and atom types/elements already parsed (atomnos). However, if atom
# are given different names (number after element, like H1 and H2), then NWChem
# generally prints the gaussian parameters for all unique names, like this:
#
# Basis "ao basis" -> "ao basis" (cartesian)
# -----
# O (Oxygen)
# ----------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 1.30709320E+02 0.154329
# 1 S 2.38088610E+01 0.535328
# (...)
#
# H1 (Hydrogen)
# -------------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 3.42525091E+00 0.154329
# (...)
#
# H2 (Hydrogen)
# -------------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 3.42525091E+00 0.154329
# (...)
#
# This current parsing code below assumes all atoms of the same element
# use the same basis set, but that might not be true, and this will probably
# need to be considered in the future when such a logfile appears.
if line.strip() == """Basis "ao basis" -> "ao basis" (cartesian)""":
self.skip_line(inputfile, 'dashes')
gbasis_dict = {}
line = next(inputfile)
while line.strip():
atomname = line.split()[0]
atomelement = self.name2element(atomname)
gbasis_dict[atomelement] = []
self.skip_lines(inputfile, ['d', 'labels', 'd'])
shells = []
line = next(inputfile)
while line.strip() and line.split()[0].isdigit():
shell = None
while line.strip():
nshell, type, exp, coeff = line.split()
nshell = int(nshell)
assert len(shells) == nshell - 1
if not shell:
shell = (type, [])
else:
assert shell[0] == type
exp = float(exp)
coeff = float(coeff)
shell[1].append((exp, coeff))
line = next(inputfile)
shells.append(shell)
line = next(inputfile)
gbasis_dict[atomelement].extend(shells)
gbasis = []
for i in range(self.natom):
atomtype = self.table.element[self.atomnos[i]]
gbasis.append(gbasis_dict[atomtype])
self.set_attribute('gbasis', gbasis)
# Normally the indexes of AOs assigned to specific atoms are also not printed,
# so we need to infer that. We could do that from the previous section,
# it might be worthwhile to take numbers from two different places, hence
# the code below, which builds atombasis based on the number of functions
# listed in this summary of the AO basis. Similar to previous section, here
# we assume all atoms of the same element have the same basis sets, but
# this will probably need to be revised later.
# The section we can glean info about aonmaes looks like:
#
# Summary of "ao basis" -> "ao basis" (cartesian)
# ------------------------------------------------------------------------------
# Tag Description Shells Functions and Types
# ---------------- ------------------------------ ------ ---------------------
# C sto-3g 3 5 2s1p
# H sto-3g 1 1 1s
#
# However, we need to make sure not to match the following entry lines:
#
# * Summary of "ao basis" -> "" (cartesian)
# * Summary of allocated global arrays
#
# Unfortantely, "ao basis" isn't unique because it can be renamed to anything for
# later reference: http://www.nwchem-sw.org/index.php/Basis
# It also appears that we have to handle cartesian vs. spherical
if line[1:11] == "Summary of":
match = re.match(r' Summary of "([^\"]*)" -> "([^\"]*)" \((.+)\)', line)
if match and match.group(1) == match.group(2):
self.skip_lines(inputfile, ['d', 'title', 'd'])
self.shells = {}
self.shells["type"] = match.group(3)
atombasis_dict = {}
line = next(inputfile)
while line.strip():
atomname, desc, shells, funcs, types = line.split()
atomelement = self.name2element(atomname)
self.metadata["basis_set"] = desc
self.shells[atomname] = types
atombasis_dict[atomelement] = int(funcs)
line = next(inputfile)
last = 0
atombasis = []
for atom in self.atomnos:
atomelement = self.table.element[atom]
nfuncs = atombasis_dict[atomelement]
atombasis.append(list(range(last, last+nfuncs)))
last = atombasis[-1][-1] + 1
self.set_attribute('atombasis', atombasis)
if line.strip() == "Symmetry analysis of basis":
self.skip_lines(inputfile, ['d', 'b'])
if not hasattr(self, 'symlabels'):
self.symlabels = []
for _ in range(self.pg_order):
line = next(inputfile)
self.symlabels.append(self.normalisesym(line.split()[0]))
# This section contains general parameters for Hartree-Fock calculations,
# which do not contain the 'General Information' section like most jobs.
if line.strip() == "NWChem SCF Module":
# If the calculation doesn't have a title specified, there
# aren't as many lines to skip here.
self.skip_lines(inputfile, ['d', 'b', 'b'])
line = next(inputfile)
if line.strip():
self.skip_lines(inputfile, ['b', 'b', 'b'])
line = next(inputfile)
while line.strip():
if line[2:8] == "charge":
charge = int(float(line.split()[-1]))
self.set_attribute('charge', charge)
if line[2:13] == "open shells":
unpaired = int(line.split()[-1])
self.set_attribute('mult', 2*unpaired + 1)
if line[2:7] == "atoms":
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if line[2:11] == "functions":
nfuncs = int(line.split()[-1])
self.set_attribute("nbasis", nfuncs)
line = next(inputfile)
# This section contains general parameters for DFT calculations, as well as
# for the many-electron theory module.
if line.strip() == "General Information":
if hasattr(self, 'linesearch') and self.linesearch:
return
while line.strip():
if "No. of atoms" in line:
self.set_attribute('natom', int(line.split()[-1]))
if "Charge" in line:
self.set_attribute('charge', int(line.split()[-1]))
if "Spin multiplicity" in line:
mult = line.split()[-1]
if mult == "singlet":
mult = 1
self.set_attribute('mult', int(mult))
if "AO basis - number of function" in line:
nfuncs = int(line.split()[-1])
self.set_attribute('nbasis', nfuncs)
# These will be present only in the DFT module.
if "Convergence on energy requested" in line:
target_energy = utils.float(line.split()[-1])
if "Convergence on density requested" in line:
target_density = utils.float(line.split()[-1])
if "Convergence on gradient requested" in line:
target_gradient = utils.float(line.split()[-1])
line = next(inputfile)
# Pretty nasty temporary hack to set scftargets only in the SCF module.
if "target_energy" in dir() and "target_density" in dir() and "target_gradient" in dir():
if not hasattr(self, 'scftargets'):
self.scftargets = []
self.scftargets.append([target_energy, target_density, target_gradient])
#DFT functional information
if "XC Information" in line:
line = next(inputfile)
line = next(inputfile)
self.metadata["functional"] = line.split()[0]
# If the full overlap matrix is printed, it looks like this:
#
# global array: Temp Over[1:60,1:60], handle: -996
#
# 1 2 3 4 5 6
# ----------- ----------- ----------- ----------- ----------- -----------
# 1 1.00000 0.24836 -0.00000 -0.00000 0.00000 0.00000
# 2 0.24836 1.00000 0.00000 -0.00000 0.00000 0.00030
# 3 -0.00000 0.00000 1.00000 0.00000 0.00000 -0.00014
# ...
if "global array: Temp Over[" in line:
self.set_attribute('nbasis', int(line.split('[')[1].split(',')[0].split(':')[1]))
self.set_attribute('nmo', int(line.split(']')[0].split(',')[1].split(':')[1]))
aooverlaps = []
while len(aooverlaps) < self.nbasis:
self.skip_line(inputfile, 'blank')
indices = [int(i) for i in inputfile.next().split()]
assert indices[0] == len(aooverlaps) + 1
self.skip_line(inputfile, "dashes")
data = [inputfile.next().split() for i in range(self.nbasis)]
indices = [int(d[0]) for d in data]
assert indices == list(range(1, self.nbasis+1))
for i in range(1, len(data[0])):
vector = [float(d[i]) for d in data]
aooverlaps.append(vector)
self.set_attribute('aooverlaps', aooverlaps)
if line.strip() in ("The SCF is already converged", "The DFT is already converged"):
if hasattr(self, 'linesearch') and self.linesearch:
return
if hasattr(self, 'scftargets'):
self.scftargets.append(self.scftargets[-1])
if hasattr(self, 'scfvalues'):
self.scfvalues.append(self.scfvalues[-1])
# The default (only?) SCF algorithm for Hartree-Fock is a preconditioned conjugate
# gradient method that apparently "always" converges, so this header should reliably
# signal a start of the SCF cycle. The convergence targets are also printed here.
if line.strip() == "Quadratically convergent ROHF":
if hasattr(self, 'linesearch') and self.linesearch:
return
while not "Final" in line:
# Only the norm of the orbital gradient is used to test convergence.
if line[:22] == " Convergence threshold":
target = float(line.split()[-1])
if not hasattr(self, "scftargets"):
self.scftargets = []
self.scftargets.append([target])
# This is critical for the stop condition of the section,
# because the 'Final Fock-matrix accuracy' is along the way.
# It would be prudent to find a more robust stop condition.
while list(set(line.strip())) != ["-"]:
line = next(inputfile)
if line.split() == ['iter', 'energy', 'gnorm', 'gmax', 'time']:
values = []
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
while line.strip():
it, energy, gnorm, gmax, time = line.split()
gnorm = utils.float(gnorm)
values.append([gnorm])
try:
line = next(inputfile)
# Is this the end of the file for some reason?
except StopIteration:
self.logger.warning(
f"File terminated before end of last SCF! Last gradient norm: {gnorm}"
)
break
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(values)
try:
line = next(inputfile)
except StopIteration:
self.logger.warning('File terminated?')
break
# The SCF for DFT does not use the same algorithm as Hartree-Fock, but always
# seems to use the following format to report SCF convergence:
# convergence iter energy DeltaE RMS-Dens Diis-err time
# ---------------- ----- ----------------- --------- --------- --------- ------
# d= 0,ls=0.0,diis 1 -382.2544324446 -8.28D+02 1.42D-02 3.78D-01 23.2
# d= 0,ls=0.0,diis 2 -382.3017298534 -4.73D-02 6.99D-03 3.82D-02 39.3
# d= 0,ls=0.0,diis 3 -382.2954343173 6.30D-03 4.21D-03 7.95D-02 55.3
# ...
if line.split() == ['convergence', 'iter', 'energy', 'DeltaE', 'RMS-Dens', 'Diis-err', 'time']:
if hasattr(self, 'linesearch') and self.linesearch:
return
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
values = []
while line.strip():
# Sometimes there are things in between iterations with fewer columns,
# and we want to skip those lines, most probably. An exception might
# unrestricted calcualtions, which show extra RMS density and DIIS
# errors, although it is not clear yet whether these are for the
# beta orbitals or somethine else. The iterations look like this in that case:
# convergence iter energy DeltaE RMS-Dens Diis-err time
# ---------------- ----- ----------------- --------- --------- --------- ------
# d= 0,ls=0.0,diis 1 -382.0243202601 -8.28D+02 7.77D-03 1.04D-01 30.0
# 7.68D-03 1.02D-01
# d= 0,ls=0.0,diis 2 -382.0647539758 -4.04D-02 4.64D-03 1.95D-02 59.2
# 5.39D-03 2.36D-02
# ...
if len(line[17:].split()) == 6:
iter, energy, deltaE, dens, diis, time = line[17:].split()
val_energy = utils.float(deltaE)
val_density = utils.float(dens)
val_gradient = utils.float(diis)
values.append([val_energy, val_density, val_gradient])
try:
line = next(inputfile)
# Is this the end of the file for some reason?
except StopIteration:
self.logger.warning(
f"File terminated before end of last SCF! Last error: {diis}"
)
break
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(values)
# These triggers are supposed to catch the current step in a geometry optimization search
# and determine whether we are currently in the main (initial) SCF cycle of that step
# or in the subsequent line search. The step is printed between dashes like this:
#
# --------
# Step 0
# --------
#
# and the summary lines that describe the main SCF cycle for the frsit step look like this:
#
#@ Step Energy Delta E Gmax Grms Xrms Xmax Walltime
#@ ---- ---------------- -------- -------- -------- -------- -------- --------
#@ 0 -379.76896249 0.0D+00 0.04567 0.01110 0.00000 0.00000 4.2
# ok ok
#
# However, for subsequent step the format is a bit different:
#
# Step Energy Delta E Gmax Grms Xrms Xmax Walltime
# ---- ---------------- -------- -------- -------- -------- -------- --------
#@ 2 -379.77794602 -7.4D-05 0.00118 0.00023 0.00440 0.01818 14.8
# ok
#
# There is also a summary of the line search (which we don't use now), like this:
#
# Line search:
# step= 1.00 grad=-1.8D-05 hess= 8.9D-06 energy= -379.777955 mode=accept
# new step= 1.00 predicted energy= -379.777955
#
if line[10:14] == "Step":
self.geostep = int(line.split()[-1])
self.skip_line(inputfile, 'dashes')
self.linesearch = False
if line[0] == "@" and line.split()[1] == "Step":
at_and_dashes = next(inputfile)
line = next(inputfile)
tokens = line.split()
assert int(tokens[1]) == self.geostep == 0
gmax = float(tokens[4])
grms = float(tokens[5])
xrms = float(tokens[6])
xmax = float(tokens[7])
self.append_attribute("geovalues", [gmax, grms, xmax, xrms])
self.linesearch = True
if line[2:6] == "Step":
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
tokens = line.split()
assert int(tokens[1]) == self.geostep
gmax = float(tokens[4])
grms = float(tokens[5])
xrms = float(tokens[6])
xmax = float(tokens[7])
if hasattr(self, 'linesearch') and self.linesearch:
return
self.append_attribute("geovalues", [gmax, grms, xmax, xrms])
self.linesearch = True
# There is a clear message when the geometry optimization has converged:
#
# ----------------------
# Optimization converged
# ----------------------
#
if line.strip() == "Optimization converged":
self.skip_line(inputfile, 'dashes')
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
if "Failed to converge" in line and hasattr(self, 'geovalues'):
if not hasattr(self, 'optdone'):
self.optdone = []
# extract the theoretical method
if "Total SCF energy" in line:
self.metadata["methods"].append("HF")
if "Total DFT energy" in line:
self.metadata["methods"].append("DFT")
# The line containing the final SCF energy seems to be always identifiable like this.
if "Total SCF energy" in line or "Total DFT energy" in line:
# NWChem often does a line search during geometry optimization steps, reporting
# the SCF information but not the coordinates (which are not necessarily 'intermediate'
# since the step size can become smaller). We want to skip these SCF cycles,
# unless the coordinates can also be extracted (possibly from the gradients?).
if hasattr(self, 'linesearch') and self.linesearch:
return
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = float(line.split()[-1])
energy = utils.convertor(energy, "hartree", "eV")
self.scfenergies.append(energy)
if "Dispersion correction" in line:
dispersion = utils.convertor(float(line.split()[-1]), "hartree", "eV")
self.append_attribute("dispersionenergies", dispersion)
# The final MO orbitals are printed in a simple list, but apparently not for
# DFT calcs, and often this list does not contain all MOs, so make sure to
# parse them from the MO analysis below if possible. This section will be like this:
#
# Symmetry analysis of molecular orbitals - final
# -----------------------------------------------
#
# Numbering of irreducible representations:
#
# 1 ag 2 au 3 bg 4 bu
#
# Orbital symmetries:
#
# 1 bu 2 ag 3 bu 4 ag 5 bu
# 6 ag 7 bu 8 ag 9 bu 10 ag
# ...
if line.strip() == "Symmetry analysis of molecular orbitals - final":
self.skip_lines(inputfile, ['d', 'b', 'numbering', 'b', 'reps', 'b', 'syms', 'b'])
if not hasattr(self, 'mosyms'):
self.mosyms = [[None]*self.nbasis]
line = next(inputfile)
while line.strip():
ncols = len(line.split())
assert ncols % 2 == 0
for i in range(ncols//2):
index = int(line.split()[i*2]) - 1
sym = line.split()[i*2+1]
sym = sym[0].upper() + sym[1:]
if self.mosyms[0][index]:
if self.mosyms[0][index] != sym:
self.logger.warning(
f"Symmetry of MO {int(index + 1)} has changed"
)
self.mosyms[0][index] = sym
line = next(inputfile)
# The same format is used for HF and DFT molecular orbital analysis. We want to parse
# the MO energies from this section, although it is printed already before this with
# less precision (might be useful to parse that if this is not available). Also, this
# section contains coefficients for the leading AO contributions, so it might also
# be useful to parse and use those values if the full vectors are not printed.
#
# The block looks something like this (two separate alpha/beta blocks in the unrestricted case):
#
# ROHF Final Molecular Orbital Analysis
# -------------------------------------
#
# Vector 1 Occ=2.000000D+00 E=-1.104059D+01 Symmetry=bu
# MO Center= 1.4D-17, 0.0D+00, -6.5D-37, r^2= 2.1D+00
# Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
# ----- ------------ --------------- ----- ------------ ---------------
# 1 0.701483 1 C s 6 -0.701483 2 C s
#
# Vector 2 Occ=2.000000D+00 E=-1.104052D+01 Symmetry=ag
# ...
# Vector 12 Occ=2.000000D+00 E=-1.020253D+00 Symmetry=bu
# MO Center= -1.4D-17, -5.6D-17, 2.9D-34, r^2= 7.9D+00
# Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
# ----- ------------ --------------- ----- ------------ ---------------
# 36 -0.298699 11 C s 41 0.298699 12 C s
# 2 0.270804 1 C s 7 -0.270804 2 C s
# 48 -0.213655 15 C s 53 0.213655 16 C s
# ...
#
if "Final" in line and "Molecular Orbital Analysis" in line:
# Unrestricted jobs have two such blocks, for alpha and beta orbitals, and
# we need to keep track of which one we're parsing (always alpha in restricted case).
unrestricted = ("Alpha" in line) or ("Beta" in line)
alphabeta = int("Beta" in line)
self.skip_lines(inputfile, ['dashes', 'blank'])
nvectors = []
mooccnos = []
energies = []
symmetries = [None]*self.nbasis
line = next(inputfile)
while line[:7] == " Vector":
# Note: the vector count starts from 1 in NWChem.
nvector = int(line[7:12])
nvectors.append(nvector)
# A nonzero occupancy for SCF jobs means the orbital is occupied.
mooccno = int(utils.float(line[18:30]))
mooccnos.append(mooccno)
# If the printout does not start from the first MO, assume None for all previous orbitals.
if len(energies) == 0 and nvector > 1:
for i in range(1, nvector):
energies.append(None)
energy = utils.float(line[34:47])
energy = utils.convertor(energy, "hartree", "eV")
energies.append(energy)
# When symmetry is not used, this part of the line is missing.
if line[47:58].strip() == "Symmetry=":
sym = line[58:].strip()
sym = sym[0].upper() + sym[1:]
symmetries[nvector-1] = sym
line = next(inputfile)
if "MO Center" in line:
line = next(inputfile)
if "Bfn." in line:
line = next(inputfile)
if "-----" in line:
line = next(inputfile)
while line.strip():
line = next(inputfile)
line = next(inputfile)
self.set_attribute('nmo', nvector)
if not hasattr(self, 'moenergies') or (len(self.moenergies) > alphabeta):
self.moenergies = []
self.moenergies.append(energies)
if not hasattr(self, 'mosyms') or (len(self.mosyms) > alphabeta):
self.mosyms = []
self.mosyms.append(symmetries)
if not hasattr(self, 'homos') or (len(self.homos) > alphabeta):
self.homos = []
nvector_index = mooccnos.index(0) - 1
if nvector_index > -1:
self.homos.append(nvectors[nvector_index] - 1)
else:
self.homos.append(-1)
# If this was a restricted open-shell calculation, append
# to HOMOs twice since only one Molecular Orbital Analysis
# section is in the output file.
if (not unrestricted) and (1 in mooccnos):
nvector_index = mooccnos.index(1) - 1
if nvector_index > -1:
self.homos.append(nvectors[nvector_index] - 1)
else:
self.homos.append(-1)
# This is where the full MO vectors are printed, but a special
# directive is needed for it in the `scf` or `dft` block:
# print "final vectors" "final vectors analysis"
# which gives:
#
# Final MO vectors
# ----------------
#
#
# global array: alpha evecs[1:60,1:60], handle: -995
#
# 1 2 3 4 5 6
# ----------- ----------- ----------- ----------- ----------- -----------
# 1 -0.69930 -0.69930 -0.02746 -0.02769 -0.00313 -0.02871
# 2 -0.03156 -0.03135 0.00410 0.00406 0.00078 0.00816
# 3 0.00002 -0.00003 0.00067 0.00065 -0.00526 -0.00120
# ...
#
if line.strip() == "Final MO vectors":
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
self.skip_lines(inputfile, ['d', 'b', 'b'])
# The columns are MOs, rows AOs, but that's and educated guess since no
# atom information is printed alongside the indices. This next line gives
# the dimensions, which we can check. if set before this. Also, this line
# specifies whether we are dealing with alpha or beta vectors.
array_info = next(inputfile)
while ("global array" in array_info):
alphabeta = int(line.split()[2] == "beta")
size = array_info.split('[')[1].split(']')[0]
nbasis = int(size.split(',')[0].split(':')[1])
nmo = int(size.split(',')[1].split(':')[1])
self.set_attribute('nbasis', nbasis)
self.set_attribute('nmo', nmo)
self.skip_line(inputfile, 'blank')
mocoeffs = []
while len(mocoeffs) < self.nmo:
nmos = list(map(int, next(inputfile).split()))
assert len(mocoeffs) == nmos[0] - 1
for n in nmos:
mocoeffs.append([])
self.skip_line(inputfile, 'dashes')
for nb in range(nbasis):
line = next(inputfile)
index = int(line.split()[0])
assert index == nb+1
coefficients = list(map(float, line.split()[1:]))
assert len(coefficients) == len(nmos)
for i, c in enumerate(coefficients):
mocoeffs[nmos[i]-1].append(c)
self.skip_line(inputfile, 'blank')
self.mocoeffs.append(mocoeffs)
array_info = next(inputfile)
# For Hartree-Fock, the atomic Mulliken charges are typically printed like this:
#
# Mulliken analysis of the total density
# --------------------------------------
#
# Atom Charge Shell Charges
# ----------- ------ -------------------------------------------------------
# 1 C 6 6.00 1.99 1.14 2.87
# 2 C 6 6.00 1.99 1.14 2.87
# ...
if line.strip() == "Mulliken analysis of the total density":
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
charges = []
line = next(inputfile)
while line.strip():
index, atomname, nuclear, atom = line.split()[:4]
shells = line.split()[4:]
charges.append(float(nuclear)-float(atom))
line = next(inputfile)
self.atomcharges['mulliken'] = charges
# Note the 'overlap population' as printed in the Mulliken population analysis
# is not the same thing as the 'overlap matrix'. In fact, it is the overlap matrix
# multiplied elementwise times the density matrix.
#
# ----------------------------
# Mulliken population analysis
# ----------------------------
#
# ----- Total overlap population -----
#
# 1 2 3 4 5 6 7
#
# 1 1 C s 2.0694818227 -0.0535883400 -0.0000000000 -0.0000000000 -0.0000000000 -0.0000000000 0.0000039991
# 2 1 C s -0.0535883400 0.8281341291 0.0000000000 -0.0000000000 0.0000000000 0.0000039991 -0.0009906747
# ...
#
# DFT does not seem to print the separate listing of Mulliken charges
# by default, but they are printed by this modules later on. They are also print
# for Hartree-Fock runs, though, so in that case make sure they are consistent.
if line.strip() == "Mulliken population analysis":
self.skip_lines(inputfile, ['d', 'b', 'total_overlap_population', 'b'])
overlaps = []
line = next(inputfile)
while all([c.isdigit() for c in line.split()]):
# There is always a line with the MO indices printed in thie block.
indices = [int(i)-1 for i in line.split()]
for i in indices:
overlaps.append([])
# There is usually a blank line after the MO indices, but
# there are exceptions, so check if line is blank first.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
# Now we can iterate or atomic orbitals.
for nao in range(self.nbasis):
data = list(map(float, line.split()[4:]))
for i, d in enumerate(data):
overlaps[indices[i]].append(d)
line = next(inputfile)
line = next(inputfile)
# This header should be printed later, before the charges are print, which of course
# are just sums of the overlaps and could be calculated. But we just go ahead and
# parse them, make sure they're consistent with previously parsed values and
# use these since they are more precise (previous precision could have been just 0.01).
while "Total gross population on atoms" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'blank')
charges = []
for i in range(self.natom):
line = next(inputfile)
iatom, element, ncharge, epop = line.split()
iatom = int(iatom)
ncharge = float(ncharge)
epop = float(epop)
assert iatom == (i+1)
charges.append(ncharge-epop)
if not hasattr(self, 'atomcharges'):
self.atomcharges = {}
if "mulliken" in self.atomcharges:
assert max(self.atomcharges['mulliken'] - numpy.array(charges)) < 0.01
# This is going to be higher precision than "Mulliken analysis of
# the total density".
self.atomcharges['mulliken'] = charges
# NWChem prints the dipole moment in atomic units first, and we could just fast forward
# to the values in Debye, which are also printed. But we can also just convert them
# right away and so parse a little bit less. Note how the reference point is print
# here within the block nicely, as it is for all moment later.
#
# -------------
# Dipole Moment
# -------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# Dipole moment 0.0000000000 Debye(s)
# DMX 0.0000000000 DMXEFC 0.0000000000
# DMY 0.0000000000 DMYEFC 0.0000000000
# DMZ -0.0000000000 DMZEFC 0.0000000000
#
# ...
#
if line.strip() == "Dipole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_line(inputfile, 'blank')
magnitude = next(inputfile)
assert magnitude.split()[-1] == "A.U."
dipole = []
for i in range(3):
line = next(inputfile)
dipole.append(float(line.split()[1]))
dipole = utils.convertor(numpy.array(dipole), "ebohr", "Debye")
if not hasattr(self, 'moments'):
self.moments = [self.reference, dipole]
else:
self.moments[1] == dipole
# The quadrupole moment is pretty straightforward to parse. There are several
# blocks printed, and the first one called 'second moments' contains the raw
# moments, and later traceless values are printed. The moments, however, are
# not in lexicographical order, so we need to sort them. Also, the first block
# is in atomic units, so remember to convert to Buckinghams along the way.
#
# -----------------
# Quadrupole Moment
# -----------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# < R**2 > = ********** a.u. ( 1 a.u. = 0.280023 10**(-16) cm**2 )
# ( also called diamagnetic susceptibility )
#
# Second moments in atomic units
#
# Component Electronic+nuclear Point charges Total
# --------------------------------------------------------------------------
# XX -38.3608511210 0.0000000000 -38.3608511210
# YY -39.0055467347 0.0000000000 -39.0055467347
# ...
#
if line.strip() == "Quadrupole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_lines(inputfile, ['b', 'units', 'susc', 'b'])
line = next(inputfile)
assert line.strip() == "Second moments in atomic units"
self.skip_lines(inputfile, ['b', 'header', 'd'])
# Parse into a dictionary and then sort by the component key.
quadrupole = {}
for i in range(6):
line = next(inputfile)
quadrupole[line.split()[0]] = float(line.split()[-1])
lex = sorted(quadrupole.keys())
quadrupole = [quadrupole[key] for key in lex]
quadrupole = utils.convertor(numpy.array(quadrupole), "ebohr2", "Buckingham")
# The checking of potential previous values if a bit more involved here,
# because it turns out NWChem has separate keywords for dipole, quadrupole
# and octupole output. So, it is perfectly possible to print the quadrupole
# and not the dipole... if that is the case set the former to None and
# issue a warning. Also, a regression has been added to cover this case.
if not hasattr(self, 'moments') or len(self.moments) < 2:
self.logger.warning("Found quadrupole moments but no previous dipole")
self.moments = [self.reference, None, quadrupole]
else:
if len(self.moments) == 2:
self.moments.append(quadrupole)
else:
assert self.moments[2] == quadrupole
# The octupole moment is analogous to the quadrupole, but there are more components
# and the checking of previously parsed dipole and quadrupole moments is more involved,
# with a corresponding test also added to regressions.
#
# ---------------
# Octupole Moment
# ---------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# Third moments in atomic units
#
# Component Electronic+nuclear Point charges Total
# --------------------------------------------------------------------------
# XXX -0.0000000000 0.0000000000 -0.0000000000
# YYY -0.0000000000 0.0000000000 -0.0000000000
# ...
#
if line.strip() == "Octupole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_line(inputfile, 'blank')
line = next(inputfile)
assert line.strip() == "Third moments in atomic units"
self.skip_lines(inputfile, ['b', 'header', 'd'])
octupole = {}
for i in range(10):
line = next(inputfile)
octupole[line.split()[0]] = float(line.split()[-1])
lex = sorted(octupole.keys())
octupole = [octupole[key] for key in lex]
octupole = utils.convertor(numpy.array(octupole), "ebohr3", "Debye.ang2")
if not hasattr(self, 'moments') or len(self.moments) < 2:
self.logger.warning("Found octupole moments but no previous dipole or quadrupole moments")
self.moments = [self.reference, None, None, octupole]
elif len(self.moments) == 2:
self.logger.warning("Found octupole moments but no previous quadrupole moments")
self.moments.append(None)
self.moments.append(octupole)
else:
if len(self.moments) == 3:
self.moments.append(octupole)
else:
assert self.moments[3] == octupole
if "Total MP2 energy" in line:
self.metadata["methods"].append("MP2")
mpenerg = float(line.split()[-1])
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
self.mpenergies[-1].append(utils.convertor(mpenerg, "hartree", "eV"))
if line.strip() == "NWChem Extensible Many-Electron Theory Module":
ccenergies = []
while "Parallel integral file used" not in line:
line = next(inputfile)
if "CCSD total energy / hartree" in line or "total CCSD energy:" in line:
self.metadata["methods"].append("CCSD")
ccenergies.append(float(line.split()[-1]))
if "CCSD(T) total energy / hartree" in line:
self.metadata["methods"].append("CCSD(T)")
ccenergies.append(float(line.split()[-1]))
if ccenergies:
self.append_attribute(
"ccenergies", utils.convertor(ccenergies[-1], "hartree", "eV")
)
# Static and dynamic polarizability.
if "Linear Response polarizability / au" in line:
if not hasattr(self, "polarizabilities"):
self.polarizabilities = []
polarizability = []
line = next(inputfile)
assert line.split()[0] == "Frequency"
line = next(inputfile)
assert line.split()[0] == "Wavelength"
self.skip_lines(inputfile, ['coordinates', 'd'])
for _ in range(3):
line = next(inputfile)
polarizability.append(line.split()[1:])
self.polarizabilities.append(numpy.array(polarizability))
if line[:18] == ' Total times cpu:':
self.metadata['success'] = True
if line.strip() == "NWChem QMD Module":
self.is_BOMD = True
# Born-Oppenheimer molecular dynamics (BOMD): time.
if "QMD Run Information" in line:
self.skip_line(inputfile, 'd')
line = next(inputfile)
assert "Time elapsed (fs)" in line
time = float(line.split()[4])
self.append_attribute('time', time)
# BOMD: geometry coordinates when `print low`.
if line.strip() == "DFT ENERGY GRADIENTS":
if self.is_BOMD:
self.skip_lines(inputfile, ['b', 'atom coordinates gradient', 'xyzxyz'])
line = next(inputfile)
atomcoords_step = []
while line.strip():
tokens = line.split()
assert len(tokens) == 8
atomcoords_step.append([float(c) for c in tokens[2:5]])
line = next(inputfile)
self.atomcoords.append(atomcoords_step)
# Extract Thermochemistry in au (Hartree)
#
# have to deal with :
# Temperature = 298.15K
# frequency scaling parameter = 1.0000
# Zero-Point correction to Energy = 259.352 kcal/mol ( 0.413304 au)
# Thermal correction to Energy = 275.666 kcal/mol ( 0.439302 au)
# Thermal correction to Enthalpy = 276.258 kcal/mol ( 0.440246 au)
# Total Entropy = 176.764 cal/mol-K
# - Translational = 44.169 cal/mol-K (mol. weight = 448.1245)
# - Rotational = 37.018 cal/mol-K (symmetry # = 1)
# - Vibrational = 95.577 cal/mol-K
# Cv (constant volume heat capacity) = 103.675 cal/mol-K
# - Translational = 2.979 cal/mol-K
# - Rotational = 2.979 cal/mol-K
# - Vibrational = 97.716 cal/mol-K
if line[1:12] == "Temperature":
self.set_attribute("temperature", utils.float(line.split()[2][:-1]))
if line[1:28] == "frequency scaling parameter":
self.set_attribute("pressure", utils.float(line.split()[4]))
if line[1:31] == "Thermal correction to Enthalpy" and hasattr(self, "scfenergies"):
self.set_attribute(
"enthalpy",
utils.float(line.split()[8])
+ utils.convertor(self.scfenergies[-1], "eV", "hartree"),
)
if line[1:32] == "Zero-Point correction to Energy" and hasattr(self, "scfenergies"):
self.set_attribute("zpve", utils.float(line.split()[8]))
if line[1:29] == "Thermal correction to Energy" and hasattr(self, "scfenergies"):
self.set_attribute(
"electronic_thermal_energy",
utils.float(line.split()[8])
+ utils.convertor(self.scfenergies[-1], "eV", "hartree"),
)
if line[1:14] == "Total Entropy":
self.set_attribute(
"entropy",
utils.convertor(1e-3 * utils.float(line.split()[3]), "kcal/mol", "hartree"),
)
# extract vibrational frequencies (in cm-1)
if line.strip() == "Normal Eigenvalue || Projected Infra Red Intensities":
self.skip_lines(inputfile, ["units", "d"]) # units, dashes
line = next(inputfile) # first line of data
while set(line.strip()[:-1]) != {"-"}:
self.append_attribute("vibfreqs", utils.float(line.split()[1]))
self.append_attribute("vibirs", utils.float(line.split()[5]))
line = next(inputfile) # next line
# NWChem TD-DFT excited states transitions
#
# Have to deal with :
# ----------------------------------------------------------------------------
# Root 1 singlet a 0.105782828 a.u. 2.8785 eV
# ----------------------------------------------------------------------------
# Transition Moments X -1.88278 Y -0.46346 Z -0.05660
# Transition Moments XX -5.63612 XY 4.57009 XZ -0.38291
# Transition Moments YY 6.48024 YZ -1.50109 ZZ -0.17430
# Dipole Oscillator Strength 0.2653650650
# Electric Quadrupole 0.0000003789
# Magnetic Dipole 0.0000001767
# Total Oscillator Strength 0.2653656206
#
# Occ. 117 a --- Virt. 118 a 0.98676 X
# Occ. 117 a --- Virt. 118 a -0.08960 Y
# Occ. 117 a --- Virt. 119 a 0.08235 X
# ----------------------------------------------------------------------------
# Root 2 singlet a 0.127858653 a.u. 3.4792 eV
# ----------------------------------------------------------------------------
# Transition Moments X -0.02031 Y 0.11238 Z -0.09893
# Transition Moments XX -0.23065 XY -0.35697 XZ -0.11250
# Transition Moments YY 0.16402 YZ -0.01716 ZZ 0.16705
# Dipole Oscillator Strength 0.0019460560
# Electric Quadrupole 0.0000000021
# Magnetic Dipole 0.0000002301
# Total Oscillator Strength 0.0019462882
#
# Occ. 110 a --- Virt. 118 a -0.05918 X
# Occ. 110 a --- Virt. 119 a -0.06022 X
# Occ. 110 a --- Virt. 124 a 0.05962 X
# Occ. 114 a --- Virt. 118 a 0.87840 X
# Occ. 114 a --- Virt. 119 a -0.12213 X
# Occ. 114 a --- Virt. 123 a 0.07120 X
# Occ. 114 a --- Virt. 124 a -0.05022 X
# Occ. 114 a --- Virt. 125 a 0.06104 X
# Occ. 114 a --- Virt. 126 a 0.05065 X
# Occ. 115 a --- Virt. 118 a 0.12907 X
# Occ. 116 a --- Virt. 118 a -0.40137 X
if line[:6] == " Root":
self.append_attribute(
"etenergies", utils.convertor(utils.float(line.split()[-2]), "eV", "wavenumber")
)
self.append_attribute("etsyms", str.join(" ", line.split()[2:-4]))
self.skip_lines(inputfile, ["dashes"])
line = next(inputfile)
if "Spin forbidden" not in line:
# find Dipole Oscillator Strength
while not ("Dipole Oscillator Strength" in line):
line = next(inputfile)
etoscs = utils.float(line.split()[-1])
# in case of magnetic contribution replace, replace Dipole Oscillator Strength with Total Oscillator Strength
while not (line.find("Occ.") >= 0):
if "Total Oscillator Strength" in line:
etoscs = utils.float(line.split()[-1])
line = next(inputfile)
self.append_attribute("etoscs", etoscs)
CIScontrib = []
while line.find("Occ.") >= 0:
if len(line.split()) == 9: # restricted
_, occ, _, _, _, virt, _, coef, direction = line.split()
type1 = "alpha"
type2 = "alpha"
else: # unrestricted: len(line.split()) should be 11
_, occ, type1, _, _, _, virt, type2, _, coef, direction = line.split()
occ = int(occ) - 1 # subtract 1 so that it is an index into moenergies
virt = int(virt) - 1 # subtract 1 so that it is an index into moenergies
coef = utils.float(coef)
if direction == "Y":
# imaginary or negative excitation (denoted Y)
tmp = virt
virt = occ
occ = tmp
tmp = type1
type1 = type2
type2 = tmp
frommoindex = 0 # For restricted or alpha unrestricted
if type1 == "beta":
frommoindex = 1
tomoindex = 0 # For restricted or alpha unrestricted
if type2 == "beta":
tomoindex = 1
CIScontrib.append([(occ, frommoindex), (virt, tomoindex), coef])
line = next(inputfile)
self.append_attribute("etsecs", CIScontrib)
else:
self.append_attribute("etoscs", 0.0)
def before_parsing(self):
"""NWChem-specific routines performed before parsing a file.
"""
# The only reason we need this identifier is if `print low` is
# set in the input file, which we assume is likely for a BOMD
# trajectory. This will enable parsing coordinates from the
# 'DFT ENERGY GRADIENTS' section.
self.is_BOMD = False
def after_parsing(self):
"""NWChem-specific routines for after parsing a file.
Currently, expands self.shells() into self.aonames.
"""
super(NWChem, self).after_parsing()
# setup a few necessary things, including a regular expression
# for matching the shells
table = utils.PeriodicTable()
elements = [table.element[x] for x in self.atomnos]
pattern = re.compile(r"(\ds)+(\dp)*(\dd)*(\df)*(\dg)*")
labels = {}
labels['s'] = ["%iS"]
labels['p'] = ["%iPX", "%iPY", "%iPZ"]
if self.shells['type'] == 'spherical':
labels['d'] = ['%iD-2', '%iD-1', '%iD0', '%iD1', '%iD2']
labels['f'] = ['%iF-3', '%iF-2', '%iF-1', '%iF0',
'%iF1', '%iF2', '%iF3']
labels['g'] = ['%iG-4', '%iG-3', '%iG-2', '%iG-1', '%iG0',
'%iG1', '%iG2', '%iG3', '%iG4']
elif self.shells['type'] == 'cartesian':
labels['d'] = ['%iDXX', '%iDXY', '%iDXZ',
'%iDYY', '%iDYZ',
'%iDZZ']
labels['f'] = ['%iFXXX', '%iFXXY', '%iFXXZ',
'%iFXYY', '%iFXYZ', '%iFXZZ',
'%iFYYY', '%iFYYZ', '%iFYZZ',
'%iFZZZ']
labels['g'] = ['%iGXXXX', '%iGXXXY', '%iGXXXZ',
'%iGXXYY', '%iGXXYZ', '%iGXXZZ',
'%iGXYYY', '%iGXYYZ', '%iGXYZZ',
'%iGXZZZ', '%iGYYYY', '%iGYYYZ',
'%iGYYZZ', '%iGYZZZ', '%iGZZZZ']
else:
self.logger.warning("Found a non-standard aoname representation type.")
return
# now actually build aonames
# involves expanding 2s1p into appropriate types
self.aonames = []
for i, element in enumerate(elements):
try:
shell_text = self.shells[element]
except KeyError:
del self.aonames
msg = "Cannot determine aonames for at least one atom."
self.logger.warning(msg)
break
prefix = f"{element}{int(i + 1)}_" # (e.g. C1_)
matches = pattern.match(shell_text)
for j, group in enumerate(matches.groups()):
if group is None:
continue
count = int(group[:-1])
label = group[-1]
for k in range(count):
temp = [x % (j + k + 1) for x in labels[label]]
self.aonames.extend([prefix + x for x in temp])
# If we parsed a BOMD trajectory, the first two parsed
# geometries are identical, and all from the second onward are
# in Bohr. Delete the first one and perform the unit
# conversion.
if self.is_BOMD:
self.atomcoords = utils.convertor(numpy.asarray(self.atomcoords)[1:, ...],
'bohr', 'Angstrom')
| bsd-3-clause | 6f46ae2fc91aae66bcf3d2a6c37c99cf | 45.92772 | 137 | 0.475505 | 4.16412 | false | false | false | false |
cclib/cclib | cclib/method/mbo.py | 3 | 4085 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of Mayer's bond orders based on data parsed by cclib."""
import random
import numpy
from cclib.method.density import Density
class MBO(Density):
"""Mayer's bond orders"""
def __init__(self, *args):
super().__init__(logname="MBO", *args)
def __str__(self):
"""Return a string representation of the object."""
return f"Mayer's bond order of {self.data}"
def __repr__(self):
"""Return a representation of the object."""
return f'Mayer\'s bond order("{self.data}")'
def calculate(self, indices=None, fupdate=0.05):
"""Calculate Mayer's bond orders."""
retval = super().calculate(fupdate)
if not retval: #making density didn't work
return False
# Do we have the needed info in the ccData object?
if not (hasattr(self.data, "aooverlaps")
or hasattr(self.data, "fooverlaps")):
self.logger.error("Missing overlap matrix")
return False #let the caller of function know we didn't finish
if not indices:
# Build list of groups of orbitals in each atom for atomresults.
if hasattr(self.data, "aonames"):
names = self.data.aonames
overlaps = self.data.aooverlaps
elif hasattr(self.data, "fonames"):
names = self.data.fonames
overlaps = self.data.fooverlaps
else:
self.logger.error("Missing aonames or fonames")
return False
atoms = []
indices = []
name = names[0].split('_')[0]
atoms.append(name)
indices.append([0])
for i in range(1, len(names)):
name = names[i].split('_')[0]
try:
index = atoms.index(name)
except ValueError: #not found in atom list
atoms.append(name)
indices.append([i])
else:
indices[index].append(i)
self.logger.info("Creating attribute fragresults: array[3]")
size = len(indices)
# Determine number of steps, and whether process involves beta orbitals.
PS = []
PS.append(numpy.dot(self.density[0], overlaps))
nstep = size**2 #approximately quadratic in size
unrestricted = (len(self.data.mocoeffs) == 2)
if unrestricted:
self.fragresults = numpy.zeros([2, size, size], "d")
PS.append(numpy.dot(self.density[1], overlaps))
else:
self.fragresults = numpy.zeros([1, size, size], "d")
# Intialize progress if available.
if self.progress:
self.progress.initialize(nstep)
step = 0
for i in range(len(indices)):
if self.progress and random.random() < fupdate:
self.progress.update(step, "Mayer's Bond Order")
for j in range(i+1, len(indices)):
tempsumA = 0
tempsumB = 0
for a in indices[i]:
for b in indices[j]:
if unrestricted:
tempsumA += 2 * PS[0][a][b] * PS[0][b][a]
tempsumB += 2 * PS[1][a][b] * PS[1][b][a]
else:
tempsumA += PS[0][a][b] * PS[0][b][a]
self.fragresults[0][i, j] = tempsumA
self.fragresults[0][j, i] = tempsumA
if unrestricted:
self.fragresults[1][i, j] = tempsumB
self.fragresults[1][j, i] = tempsumB
if self.progress:
self.progress.update(nstep, "Done")
return True
| bsd-3-clause | 37718e29bb38a9dac6fa2edea90f506d | 31.211382 | 80 | 0.507711 | 4.151423 | false | false | false | false |
feincms/feincms | feincms/module/page/extensions/titles.py | 2 | 2115 | """
Sometimes, a single title is not enough, you'd like subtitles, and maybe
differing titles in the navigation and in the <title>-tag. This extension lets
you do that.
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
from feincms import extensions
from feincms._internal import monkeypatch_property
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class(
"_content_title",
models.TextField(
_("content title"),
blank=True,
help_text=_(
"The first line is the main title, the following"
" lines are subtitles."
),
),
)
self.model.add_to_class(
"_page_title",
models.CharField(
_("page title"),
max_length=69,
blank=True,
help_text=_(
"Page title for browser window. Same as title by"
" default. Must be 69 characters or fewer."
),
),
)
@monkeypatch_property(self.model)
def page_title(self):
"""
Use this for the browser window (<title>-tag in the <head> of the
HTML document)
"""
if self._page_title:
return self._page_title
return self.content_title
@monkeypatch_property(self.model)
def content_title(self):
"""
This should be used f.e. for the <h1>-tag
"""
if not self._content_title:
return self.title
return self._content_title.splitlines()[0]
@monkeypatch_property(self.model)
def content_subtitle(self):
return "\n".join(self._content_title.splitlines()[1:])
def handle_modeladmin(self, modeladmin):
modeladmin.add_extension_options(
_("Titles"),
{"fields": ("_content_title", "_page_title"), "classes": ("collapse",)},
)
| bsd-3-clause | 6dcd71e2a188c6567ddaa8cca7e3aaad | 28.375 | 84 | 0.521513 | 4.597826 | false | false | false | false |
feincms/feincms | feincms/templatetags/feincms_page_tags.py | 2 | 17283 | # ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import logging
import sys
import traceback
from django import template
from django.apps import apps
from django.conf import settings
from django.http import HttpRequest
from feincms import settings as feincms_settings
from feincms.module.page.extensions.navigation import PagePretender
from feincms.utils.templatetags import (
SimpleAssignmentNodeWithVarAndArgs,
do_simple_assignment_node_with_var_and_args_helper,
)
logger = logging.getLogger("feincms.templatetags.page")
register = template.Library()
def _get_page_model():
return apps.get_model(*feincms_settings.FEINCMS_DEFAULT_PAGE_MODEL.split("."))
# ------------------------------------------------------------------------
# TODO: Belongs in some utility module
def format_exception(e):
top = traceback.extract_tb(sys.exc_info()[2])[-1]
return "'%s' in %s line %d" % (e, top[0], top[1])
# ------------------------------------------------------------------------
@register.simple_tag(takes_context=True)
def feincms_nav(context, feincms_page, level=1, depth=1, group=None):
"""
Saves a list of pages into the given context variable.
"""
page_class = _get_page_model()
if not feincms_page:
return []
if isinstance(feincms_page, HttpRequest):
try:
feincms_page = page_class.objects.for_request(feincms_page, best_match=True)
except page_class.DoesNotExist:
return []
mptt_opts = feincms_page._mptt_meta
# mptt starts counting at zero
mptt_level_range = [level - 1, level + depth - 1]
queryset = feincms_page.__class__._default_manager.in_navigation().filter(
**{
"%s__gte" % mptt_opts.level_attr: mptt_level_range[0],
"%s__lt" % mptt_opts.level_attr: mptt_level_range[1],
}
)
page_level = getattr(feincms_page, mptt_opts.level_attr)
# Used for subset filtering (level>1)
parent = None
if level > 1:
# A subset of the pages is requested. Determine it depending
# upon the passed page instance
if level - 2 == page_level:
# The requested pages start directly below the current page
parent = feincms_page
elif level - 2 < page_level:
# The requested pages start somewhere higher up in the tree
parent = feincms_page.get_ancestors()[level - 2]
elif level - 1 > page_level:
# The requested pages are grandchildren of the current page
# (or even deeper in the tree). If we would continue processing,
# this would result in pages from different subtrees being
# returned directly adjacent to each other.
queryset = page_class.objects.none()
if parent:
if getattr(parent, "navigation_extension", None):
# Special case for navigation extensions
return list(
parent.extended_navigation(
depth=depth, request=context.get("request")
)
)
# Apply descendant filter
queryset &= parent.get_descendants()
if depth > 1:
# Filter out children with inactive parents
# None (no parent) is always allowed
parents = {None}
if parent:
# Subset filtering; allow children of parent as well
parents.add(parent.id)
def _parentactive_filter(iterable):
for elem in iterable:
if elem.parent_id in parents:
yield elem
parents.add(elem.id)
queryset = _parentactive_filter(queryset)
if group is not None:
# navigationgroups extension support
def _navigationgroup_filter(iterable):
for elem in iterable:
if getattr(elem, "navigation_group", None) == group:
yield elem
queryset = _navigationgroup_filter(queryset)
if hasattr(feincms_page, "navigation_extension"):
# Filter out children of nodes which have a navigation extension
def _navext_filter(iterable):
current_navextension_node = None
for elem in iterable:
# Eliminate all subitems of last processed nav extension
if (
current_navextension_node is not None
and current_navextension_node.is_ancestor_of(elem)
):
continue
yield elem
if getattr(elem, "navigation_extension", None):
current_navextension_node = elem
try:
for extended in elem.extended_navigation(
depth=depth, request=context.get("request")
):
# Only return items from the extended navigation
# which are inside the requested level+depth
# values. The "-1" accounts for the differences in
# MPTT and navigation level counting
this_level = getattr(extended, mptt_opts.level_attr, 0)
if this_level < level + depth - 1:
yield extended
except Exception as e:
logger.warn(
"feincms_nav caught exception in navigation"
" extension for page %d: %s",
current_navextension_node.id,
format_exception(e),
)
else:
current_navextension_node = None
queryset = _navext_filter(queryset)
# Return a list, not a generator so that it can be consumed
# several times in a template.
return list(queryset)
# ------------------------------------------------------------------------
class LanguageLinksNode(SimpleAssignmentNodeWithVarAndArgs):
"""
::
{% feincms_languagelinks for feincms_page as links [args] %}
This template tag needs the translations extension.
Arguments can be any combination of:
* all or existing: Return all languages or only those where a translation
exists
* excludecurrent: Excludes the item in the current language from the list
* request=request: The current request object, only needed if you are using
AppContents and need to append the "extra path"
The default behavior is to return an entry for all languages including the
current language.
Example::
{% feincms_languagelinks for feincms_page as links all,excludecurrent %}
{% for key, name, link in links %}
<a href="{% if link %}{{ link }}{% else %}/{{ key }}/{% endif %}">
{% trans name %}</a>
{% endfor %}
"""
def what(self, page, args):
only_existing = args.get("existing", False)
exclude_current = args.get("excludecurrent", False)
# Preserve the trailing path when switching languages if extra_path
# exists (this is mostly the case when we are working inside an
# ApplicationContent-managed page subtree)
trailing_path = ""
request = args.get("request", None)
if request:
# Trailing path without first slash
trailing_path = request._feincms_extra_context.get("extra_path", "")[1:]
translations = {t.language: t for t in page.available_translations()}
translations[page.language] = page
links = []
for key, name in settings.LANGUAGES:
if exclude_current and key == page.language:
continue
# hardcoded paths... bleh
if key in translations:
links.append(
(key, name, translations[key].get_absolute_url() + trailing_path)
)
elif not only_existing:
links.append((key, name, None))
return links
register.tag(
"feincms_languagelinks",
do_simple_assignment_node_with_var_and_args_helper(LanguageLinksNode),
)
# ------------------------------------------------------------------------
def _translate_page_into(page, language, default=None):
"""
Return the translation for a given page
"""
# Optimisation shortcut: No need to dive into translations if page already
# what we want
try:
if page.language == language:
return page
if language is not None:
translations = {t.language: t for t in page.available_translations()}
if language in translations:
return translations[language]
except AttributeError:
pass
if hasattr(default, "__call__"):
return default(page=page)
return default
# ------------------------------------------------------------------------
class TranslatedPageNode(SimpleAssignmentNodeWithVarAndArgs):
"""
::
{% feincms_translatedpage for feincms_page as feincms_transpage
language=en %}
{% feincms_translatedpage for feincms_page as originalpage %}
{% feincms_translatedpage for some_page as translatedpage
language=feincms_page.language %}
This template tag needs the translations extension.
Returns the requested translation of the page if it exists. If the language
argument is omitted the primary language will be returned (the first
language specified in settings.LANGUAGES).
Note: To distinguish between a bare language code and a variable we check
whether settings LANGUAGES contains that code -- so naming a variable "en"
will probably not do what is intended.
"""
def what(self, page, args, default=None):
language = args.get("language", None)
if language is None:
language = settings.LANGUAGES[0][0]
else:
if language not in (x[0] for x in settings.LANGUAGES):
try:
language = template.Variable(language).resolve(self.render_context)
except template.VariableDoesNotExist:
language = settings.LANGUAGES[0][0]
return _translate_page_into(page, language, default=default)
register.tag(
"feincms_translatedpage",
do_simple_assignment_node_with_var_and_args_helper(TranslatedPageNode),
)
# ------------------------------------------------------------------------
class TranslatedPageNodeOrBase(TranslatedPageNode):
def what(self, page, args):
return super().what(
page, args, default=getattr(page, "get_original_translation", page)
)
register.tag(
"feincms_translatedpage_or_base",
do_simple_assignment_node_with_var_and_args_helper(TranslatedPageNodeOrBase),
)
# ------------------------------------------------------------------------
@register.filter
def feincms_translated_or_base(pages, language=None):
if not hasattr(pages, "__iter__"):
pages = [pages]
for page in pages:
yield _translate_page_into(
page, language, default=page.get_original_translation
)
# ------------------------------------------------------------------------
@register.inclusion_tag("breadcrumbs.html")
def feincms_breadcrumbs(page, include_self=True):
"""
Generate a list of the page's ancestors suitable for use as breadcrumb
navigation.
By default, generates an unordered list with the id "breadcrumbs" -
override breadcrumbs.html to change this.
::
{% feincms_breadcrumbs feincms_page %}
"""
ancs = page.get_ancestors()
bc = [(anc.get_absolute_url(), anc.short_title()) for anc in ancs]
if include_self:
bc.append((None, page.short_title()))
return {"trail": bc}
# ------------------------------------------------------------------------
@register.filter
def is_parent_of(page1, page2):
"""
Determines whether a given page is the parent of another page
Example::
{% if page|is_parent_of:feincms_page %} ... {% endif %}
"""
try:
return page1.is_ancestor_of(page2)
except (AttributeError, ValueError):
return False
# ------------------------------------------------------------------------
@register.filter
def is_equal_or_parent_of(page1, page2):
"""
Determines whether a given page is equal to or the parent of another page.
This is especially handy when generating the navigation. The following
example adds a CSS class ``current`` to the current main navigation entry::
{% for page in navigation %}
<a
{% if page|is_equal_or_parent_of:feincms_page %}
class="current"
{% endif %}
>{{ page.title }}</a>
{% endfor %}
"""
try:
return page1.is_ancestor_of(page2, include_self=True)
except (AttributeError, ValueError):
return False
# ------------------------------------------------------------------------
def _is_sibling_of(page1, page2):
return page1.parent_id == page2.parent_id
@register.filter
def is_sibling_of(page1, page2):
"""
Determines whether a given page is a sibling of another page
::
{% if page|is_sibling_of:feincms_page %} ... {% endif %}
"""
try:
return _is_sibling_of(page1, page2)
except AttributeError:
return False
# ------------------------------------------------------------------------
@register.filter
def siblings_along_path_to(page_list, page2):
"""
Filters a list of pages so that only those remain that are either:
* An ancestor of the current page
* A sibling of an ancestor of the current page
A typical use case is building a navigation menu with the active
path to the current page expanded::
{% feincms_nav feincms_page level=1 depth=3 as navitems %}
{% with navitems|siblings_along_path_to:feincms_page as navtree %}
... whatever ...
{% endwith %}
"""
if page_list:
try:
# Try to avoid hitting the database: If the current page is
# in_navigation, then all relevant pages are already in the
# incoming list, no need to fetch ancestors or children.
# NOTE: This assumes that the input list actually is complete (ie.
# comes from feincms_nav). We'll cope with the fall-out of that
# assumption when it happens...
ancestors = [
a_page
for a_page in page_list
if a_page.is_ancestor_of(page2, include_self=True)
]
top_level = min(a_page.level for a_page in page_list)
if not ancestors:
# Happens when we sit on a page outside the navigation tree so
# fake an active root page to avoid a get_ancestors() db call
# which would only give us a non-navigation root page anyway.
page_class = _get_page_model()
p = page_class(
title="dummy", tree_id=-1, parent_id=None, in_navigation=False
)
ancestors = (p,)
siblings = [
a_page
for a_page in page_list
if (
a_page.parent_id == page2.id
or a_page.level == top_level
or any(_is_sibling_of(a_page, a) for a in ancestors)
)
]
return siblings
except (AttributeError, ValueError) as e:
logger.warn(
"siblings_along_path_to caught exception: %s", format_exception(e)
)
return ()
# ------------------------------------------------------------------------
@register.simple_tag(takes_context=True)
def page_is_active(context, page, feincms_page=None, path=None):
"""
Usage example::
{% feincms_nav feincms_page level=1 as toplevel %}
<ul>
{% for page in toplevel %}
{% page_is_active page as is_active %}
<li {% if is_active %}class="active"{% endif %}>
<a href="{{ page.get_navigation_url }}">{{ page.title }}</a>
<li>
{% endfor %}
</ul>
"""
if isinstance(page, PagePretender):
if path is None:
path = context["request"].path_info
return path.startswith(page.get_absolute_url())
else:
if feincms_page is None:
feincms_page = context["feincms_page"]
return page.is_ancestor_of(feincms_page, include_self=True)
# ------------------------------------------------------------------------
@register.simple_tag
def feincms_parentlink(of_, feincms_page, **kwargs):
level = int(kwargs.get("level", 1))
if feincms_page.level + 1 == level:
return feincms_page.get_absolute_url()
elif feincms_page.level + 1 < level:
return "#"
try:
return feincms_page.get_ancestors()[level - 1].get_absolute_url()
except IndexError:
return "#"
| bsd-3-clause | fa902370a08528c27d8ce762c0c3a599 | 32.300578 | 88 | 0.55083 | 4.467046 | false | false | false | false |
feincms/feincms | feincms/content/section/models.py | 2 | 3662 | from django.conf import settings as django_settings
from django.contrib import admin
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import gettext_lazy as _
from feincms import settings
from feincms.admin.item_editor import FeinCMSInline
from feincms.contrib.richtext import RichTextField
from feincms.module.medialibrary.fields import MediaFileForeignKey
from feincms.module.medialibrary.models import MediaFile
from feincms.utils.tuple import AutoRenderTuple
class SectionContentInline(FeinCMSInline):
raw_id_fields = ("mediafile",)
radio_fields = {"type": admin.VERTICAL}
class SectionContent(models.Model):
"""
Title, media file and rich text fields in one content block.
"""
feincms_item_editor_inline = SectionContentInline
feincms_item_editor_context_processors = (
lambda x: settings.FEINCMS_RICHTEXT_INIT_CONTEXT,
)
feincms_item_editor_includes = {"head": [settings.FEINCMS_RICHTEXT_INIT_TEMPLATE]}
title = models.CharField(_("title"), max_length=200, blank=True)
richtext = RichTextField(_("text"), blank=True)
mediafile = MediaFileForeignKey(
MediaFile,
on_delete=models.CASCADE,
verbose_name=_("media file"),
related_name="+",
blank=True,
null=True,
)
class Meta:
abstract = True
verbose_name = _("section")
verbose_name_plural = _("sections")
@classmethod
def initialize_type(cls, TYPE_CHOICES=None, cleanse=None):
if "feincms.module.medialibrary" not in django_settings.INSTALLED_APPS:
raise ImproperlyConfigured(
"You have to add 'feincms.module.medialibrary' to your"
" INSTALLED_APPS before creating a %s" % cls.__name__
)
if TYPE_CHOICES is None:
raise ImproperlyConfigured(
"You need to set TYPE_CHOICES when creating a" " %s" % cls.__name__
)
cls.add_to_class(
"type",
models.CharField(
_("type"),
max_length=10,
choices=TYPE_CHOICES,
default=TYPE_CHOICES[0][0],
),
)
if cleanse:
cls.cleanse = cleanse
@classmethod
def get_queryset(cls, filter_args):
# Explicitly add nullable FK mediafile to minimize the DB query count
return cls.objects.select_related("parent", "mediafile").filter(filter_args)
def render(self, **kwargs):
if self.mediafile:
mediafile_type = self.mediafile.type
else:
mediafile_type = "nomedia"
return AutoRenderTuple(
(
[
f"content/section/{mediafile_type}_{self.type}.html",
"content/section/%s.html" % mediafile_type,
"content/section/%s.html" % self.type,
"content/section/default.html",
],
{"content": self},
)
)
def save(self, *args, **kwargs):
if getattr(self, "cleanse", None):
try:
# Passes the rich text content as first argument because
# the passed callable has been converted into a bound method
self.richtext = self.cleanse(self.richtext)
except TypeError:
# Call the original callable, does not pass the rich richtext
# content instance along
self.richtext = self.cleanse.im_func(self.richtext)
super().save(*args, **kwargs)
save.alters_data = True
| bsd-3-clause | 9abb9cbf59172a9b5dddd2c0bfb9d8f1 | 32.59633 | 86 | 0.604042 | 4.263097 | false | false | false | false |
feincms/feincms | feincms/utils/managers.py | 2 | 1997 | # ------------------------------------------------------------------------
class ActiveAwareContentManagerMixin:
"""
Implement what's necessary to add some kind of "active" state for content
objects. The notion of active is defined by a number of filter rules that
must all match (AND) for the object to be active.
A Manager for a content class using the "datepublisher" extension
should either adopt this mixin or implement a similar interface.
"""
# A dict of filters which are used to determine whether a page is active or
# not. Extended for example in the datepublisher extension (date-based
# publishing and un-publishing of pages). This will be set in
# add_to_active_filters() below, so we won't share the same dict for
# derived managers, do not replace with {} here!
active_filters = None
@classmethod
def apply_active_filters(cls, queryset):
"""
Apply all filters defined to the queryset passed and return the result.
"""
if cls.active_filters is not None:
for filt in cls.active_filters.values() or ():
if callable(filt):
queryset = filt(queryset)
else:
queryset = queryset.filter(filt)
return queryset
@classmethod
def add_to_active_filters(cls, filter, key=None):
"""
Add a new clause to the active filters. A filter may be either
a Q object to be applied to the content class or a callable taking
a queryset and spitting out a new one.
If a filter with the given `key` already exists, the new filter
replaces the old.
"""
if cls.active_filters is None:
cls.active_filters = {}
if key is None:
key = filter
cls.active_filters[key] = filter
def active(self):
"""
Return only currently active objects.
"""
return self.apply_active_filters(self)
| bsd-3-clause | cecc58b6d08d7e64f14f5b4ceafb4b36 | 36.679245 | 79 | 0.604907 | 4.732227 | false | false | false | false |
jupyter/nbgrader | nbgrader/exchange/default/list.py | 2 | 10184 | import os
import glob
import shutil
import re
import hashlib
from nbgrader.exchange.abc import ExchangeList as ABCExchangeList
from nbgrader.utils import notebook_hash, make_unique_key
from .exchange import Exchange
def _checksum(path):
m = hashlib.md5()
m.update(open(path, 'rb').read())
return m.hexdigest()
class ExchangeList(ABCExchangeList, Exchange):
def init_src(self):
pass
def init_dest(self):
course_id = self.coursedir.course_id if self.coursedir.course_id else '*'
assignment_id = self.coursedir.assignment_id if self.coursedir.assignment_id else '*'
student_id = self.coursedir.student_id if self.coursedir.student_id else '*'
if self.inbound:
pattern = os.path.join(self.root, course_id, 'inbound', '{}+{}+*'.format(student_id, assignment_id))
elif self.cached:
pattern = os.path.join(self.cache, course_id, '{}+{}+*'.format(student_id, assignment_id))
else:
pattern = os.path.join(self.root, course_id, 'outbound', '{}'.format(assignment_id))
self.assignments = sorted(glob.glob(pattern))
def parse_assignment(self, assignment):
if self.inbound:
regexp = r".*/(?P<course_id>.*)/inbound/(?P<student_id>[^+]*)\+(?P<assignment_id>[^+]*)\+(?P<timestamp>[^+]*)(?P<random_string>\+.*)?"
elif self.cached:
regexp = r".*/(?P<course_id>.*)/(?P<student_id>.*)\+(?P<assignment_id>.*)\+(?P<timestamp>.*)"
else:
regexp = r".*/(?P<course_id>.*)/outbound/(?P<assignment_id>.*)"
m = re.match(regexp, assignment)
if m is None:
raise RuntimeError("Could not match '%s' with regexp '%s'", assignment, regexp)
return m.groupdict()
def format_inbound_assignment(self, info):
msg = "{course_id} {student_id} {assignment_id} {timestamp}".format(**info)
if info['status'] == 'submitted':
if info['has_local_feedback'] and not info['feedback_updated']:
msg += " (feedback already fetched)"
elif info['has_exchange_feedback']:
msg += " (feedback ready to be fetched)"
else:
msg += " (no feedback available)"
return msg
def format_outbound_assignment(self, info):
msg = "{course_id} {assignment_id}".format(**info)
if os.path.exists(info['assignment_id']):
msg += " (already downloaded)"
return msg
def copy_files(self):
pass
def parse_assignments(self):
if self.coursedir.student_id:
courses = self.authenticator.get_student_courses(self.coursedir.student_id)
else:
courses = None
assignments = []
for path in self.assignments:
info = self.parse_assignment(path)
if courses is not None and info['course_id'] not in courses:
continue
if self.path_includes_course:
assignment_dir = os.path.join(self.assignment_dir, info['course_id'], info['assignment_id'])
else:
assignment_dir = os.path.join(self.assignment_dir, info['assignment_id'])
if self.inbound or self.cached:
info['status'] = 'submitted'
info['path'] = path
elif os.path.exists(assignment_dir):
info['status'] = 'fetched'
info['path'] = os.path.abspath(assignment_dir)
else:
info['status'] = 'released'
info['path'] = path
if self.remove:
info['status'] = 'removed'
notebooks = sorted(glob.glob(os.path.join(info['path'], '*.ipynb')))
if not notebooks:
self.log.warning("No notebooks found in {}".format(info['path']))
info['notebooks'] = []
for notebook in notebooks:
nb_info = {
'notebook_id': os.path.splitext(os.path.split(notebook)[1])[0],
'path': os.path.abspath(notebook)
}
if info['status'] != 'submitted':
info['notebooks'].append(nb_info)
continue
nb_info['has_local_feedback'] = False
nb_info['has_exchange_feedback'] = False
nb_info['local_feedback_path'] = None
nb_info['feedback_updated'] = False
# Check whether feedback has been fetched already.
local_feedback_dir = os.path.join(
assignment_dir, 'feedback', info['timestamp'])
local_feedback_path = os.path.join(
local_feedback_dir, '{0}.html'.format(nb_info['notebook_id']))
has_local_feedback = os.path.isfile(local_feedback_path)
if has_local_feedback:
local_feedback_checksum = _checksum(local_feedback_path)
else:
local_feedback_checksum = None
# Also look to see if there is feedback available to fetch.
unique_key = make_unique_key(
info['course_id'],
info['assignment_id'],
nb_info['notebook_id'],
info['student_id'],
info['timestamp'])
self.log.debug("Unique key is: {}".format(unique_key))
nb_hash = notebook_hash(notebook, unique_key)
exchange_feedback_path = os.path.join(
self.root, info['course_id'], 'feedback', '{0}.html'.format(nb_hash))
has_exchange_feedback = os.path.isfile(exchange_feedback_path)
if not has_exchange_feedback:
# Try looking for legacy feedback.
nb_hash = notebook_hash(notebook)
exchange_feedback_path = os.path.join(
self.root, info['course_id'], 'feedback', '{0}.html'.format(nb_hash))
has_exchange_feedback = os.path.isfile(exchange_feedback_path)
if has_exchange_feedback:
exchange_feedback_checksum = _checksum(exchange_feedback_path)
else:
exchange_feedback_checksum = None
nb_info['has_local_feedback'] = has_local_feedback
nb_info['has_exchange_feedback'] = has_exchange_feedback
if has_local_feedback:
nb_info['local_feedback_path'] = local_feedback_path
if has_local_feedback and has_exchange_feedback:
nb_info['feedback_updated'] = exchange_feedback_checksum != local_feedback_checksum
info['notebooks'].append(nb_info)
if info['status'] == 'submitted':
if info['notebooks']:
has_local_feedback = all([nb['has_local_feedback'] for nb in info['notebooks']])
has_exchange_feedback = all([nb['has_exchange_feedback'] for nb in info['notebooks']])
feedback_updated = any([nb['feedback_updated'] for nb in info['notebooks']])
else:
has_local_feedback = False
has_exchange_feedback = False
feedback_updated = False
info['has_local_feedback'] = has_local_feedback
info['has_exchange_feedback'] = has_exchange_feedback
info['feedback_updated'] = feedback_updated
if has_local_feedback:
info['local_feedback_path'] = os.path.join(
assignment_dir, 'feedback', info['timestamp'])
else:
info['local_feedback_path'] = None
assignments.append(info)
# partition the assignments into groups for course/student/assignment
if self.inbound or self.cached:
_get_key = lambda info: (info['course_id'], info['student_id'], info['assignment_id'])
_match_key = lambda info, key: (
info['course_id'] == key[0] and
info['student_id'] == key[1] and
info['assignment_id'] == key[2])
assignment_keys = sorted(list(set([_get_key(info) for info in assignments])))
assignment_submissions = []
for key in assignment_keys:
submissions = [x for x in assignments if _match_key(x, key)]
submissions = sorted(submissions, key=lambda x: x['timestamp'])
info = {
'course_id': key[0],
'student_id': key[1],
'assignment_id': key[2],
'status': submissions[0]['status'],
'submissions': submissions
}
assignment_submissions.append(info)
assignments = assignment_submissions
return assignments
def list_files(self):
"""List files."""
assignments = self.parse_assignments()
if self.inbound or self.cached:
self.log.info("Submitted assignments:")
for assignment in assignments:
for info in assignment['submissions']:
self.log.info(self.format_inbound_assignment(info))
else:
self.log.info("Released assignments:")
for info in assignments:
self.log.info(self.format_outbound_assignment(info))
return assignments
def remove_files(self):
"""List and remove files."""
assignments = self.parse_assignments()
if self.inbound or self.cached:
self.log.info("Removing submitted assignments:")
for assignment in assignments:
for info in assignment['submissions']:
self.log.info(self.format_inbound_assignment(info))
else:
self.log.info("Removing released assignments:")
for info in assignments:
self.log.info(self.format_outbound_assignment(info))
for assignment in self.assignments:
shutil.rmtree(assignment)
return assignments
| bsd-3-clause | e57319dbadf88fe7646ef6df181625fd | 41.610879 | 146 | 0.542714 | 4.404844 | false | false | false | false |
jupyter/nbgrader | nbgrader/converters/generate_feedback.py | 2 | 2383 | import os
from traitlets.config import Config
from traitlets import List, default
from nbconvert.exporters import HTMLExporter
from nbconvert.preprocessors import CSSHTMLHeaderPreprocessor
from .base import BaseConverter
from ..preprocessors import GetGrades
class GenerateFeedback(BaseConverter):
@property
def _input_directory(self):
return self.coursedir.autograded_directory
@property
def _output_directory(self):
return self.coursedir.feedback_directory
preprocessors = List([
GetGrades,
CSSHTMLHeaderPreprocessor
]).tag(config=True)
@default("classes")
def _classes_default(self):
classes = super(GenerateFeedback, self)._classes_default()
classes.append(HTMLExporter)
return classes
@default("export_class")
def _exporter_class_default(self):
return HTMLExporter
@default("permissions")
def _permissions_default(self):
return 664 if self.coursedir.groupshared else 644
def _load_config(self, cfg, **kwargs):
if 'Feedback' in cfg:
self.log.warning(
"Use GenerateFeedback in config, not Feedback. Outdated config:\n%s",
'\n'.join(
'Feedback.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.GenerateFeedbackApp.items()
)
)
cfg.GenerateFeedback.merge(cfg.Feedback)
del cfg.Feedback
super(GenerateFeedback, self)._load_config(cfg, **kwargs)
def __init__(self, coursedir=None, **kwargs):
super(GenerateFeedback, self).__init__(coursedir=coursedir, **kwargs)
c = Config()
if 'template_name' not in self.config.HTMLExporter:
c.HTMLExporter.template_name = 'feedback'
if 'extra_template_basedirs' not in self.config.HTMLExporter:
template_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'server_extensions', 'formgrader', 'templates'))
c.HTMLExporter.extra_template_basedirs = [template_path]
extra_static_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'server_extensions', 'formgrader', 'static', 'components', 'bootstrap', 'css'))
c.HTMLExporter.extra_template_paths = [extra_static_path]
self.update_config(c)
| bsd-3-clause | e54eb3305f66ff396362f3100ed5c7f3 | 34.044118 | 169 | 0.641209 | 4.025338 | false | true | false | false |
jupyter/nbgrader | nbgrader/apps/extensionapp.py | 3 | 1166 | # coding: utf-8
from traitlets.config.application import catch_config_error
from .baseapp import NbGrader, format_excepthook
_compat_message = """
The installation of the nbgrader extensions are now managed through the
`jupyter nbextension` and `jupyter serverextension` commands.
To install and enable the nbextensions (assignment_list and create_assignment) run:
$ jupyter nbextension install --sys-prefix --py nbgrader
$ jupyter nbextension enable --sys-prefix --py nbgrader
To install the server extension (assignment_list) run:
$ jupyter serverextension enable --sys-prefix --py nbgrader
To install for all users, replace `--sys-prefix` by `--system`.
To install only for the current user replace `--sys-prefix` by `--user`.
"""
class ExtensionApp(NbGrader):
name = u'nbgrader extension'
description = u'Utilities for managing the nbgrader extension'
examples = ""
@catch_config_error
def initialize(self, argv=None):
super(ExtensionApp, self).initialize(argv)
def start(self):
for line in _compat_message.split('\n'):
self.log.info(line)
super(ExtensionApp, self).start()
| bsd-3-clause | 48567e2322aae3e4fca010c763dfb2db | 30.513514 | 83 | 0.716123 | 3.749196 | false | true | false | false |
jupyter/nbgrader | nbgrader/exchange/default/collect.py | 2 | 6185 | import os
import glob
import shutil
import sys
from collections import defaultdict
from textwrap import dedent
import datetime
from nbgrader.exchange.abc import ExchangeCollect as ABCExchangeCollect
from .exchange import Exchange
from nbgrader.utils import check_mode, parse_utc
from ...api import Gradebook, MissingEntry
from ...utils import check_mode, parse_utc
# pwd is for matching unix names with student ide, so we shouldn't import it on
# windows machines
if sys.platform != 'win32':
import pwd
else:
pwd = None
def groupby(l, key=lambda x: x):
d = defaultdict(list)
for item in l:
d[key(item)].append(item)
return d
class ExchangeCollect(Exchange, ABCExchangeCollect):
def _path_to_record(self, path):
filename = os.path.split(path)[1]
# Only split twice on +, giving three components. This allows usernames with +.
filename_list = filename.rsplit('+', 3)
if len(filename_list) < 3:
self.fail("Invalid filename: {}".format(filename))
username = filename_list[0]
timestamp = parse_utc(filename_list[2])
return {'username': username, 'filename': filename, 'timestamp': timestamp}
def _sort_by_timestamp(self, records):
return sorted(records, key=lambda item: item['timestamp'], reverse=True)
def init_src(self):
if self.coursedir.course_id == '':
self.fail("No course id specified. Re-run with --course flag.")
self.course_path = os.path.join(self.root, self.coursedir.course_id)
self.inbound_path = os.path.join(self.course_path, 'inbound')
if not os.path.isdir(self.inbound_path):
self.fail("Course not found: {}".format(self.inbound_path))
if not check_mode(self.inbound_path, read=True, execute=True):
self.fail("You don't have read permissions for the directory: {}".format(self.inbound_path))
student_id = self.coursedir.student_id if self.coursedir.student_id else '*'
pattern = os.path.join(self.inbound_path, '{}+{}+*'.format(student_id, self.coursedir.assignment_id))
records = [self._path_to_record(f) for f in glob.glob(pattern)]
usergroups = groupby(records, lambda item: item['username'])
with Gradebook(self.coursedir.db_url, self.coursedir.course_id) as gb:
try:
assignment = gb.find_assignment(self.coursedir.assignment_id)
self.duedate = assignment.duedate
except MissingEntry:
self.duedate = None
if self.duedate is None or not self.before_duedate:
self.src_records = [self._sort_by_timestamp(v)[0] for v in usergroups.values()]
else:
self.src_records = []
for v in usergroups.values():
records = self._sort_by_timestamp(v)
records_before_duedate = [record for record in records if record['timestamp'] <= self.duedate]
if records_before_duedate:
self.src_records.append(records_before_duedate[0])
else:
self.src_records.append(records[0])
def init_dest(self):
pass
def copy_files(self):
if len(self.src_records) == 0:
self.log.warning("No submissions of '{}' for course '{}' to collect".format(
self.coursedir.assignment_id,
self.coursedir.course_id))
else:
self.log.info("Processing {} submissions of '{}' for course '{}'".format(
len(self.src_records),
self.coursedir.assignment_id,
self.coursedir.course_id))
for rec in self.src_records:
student_id = rec['username']
src_path = os.path.join(self.inbound_path, rec['filename'])
# Cross check the student id with the owner of the submitted directory
if self.check_owner and pwd is not None: # check disabled under windows
try:
owner = pwd.getpwuid(os.stat(src_path).st_uid).pw_name
except KeyError:
owner = "unknown id"
if student_id != owner:
self.log.warning(dedent(
"""
{} claims to be submitted by {} but is owned by {}; cheating attempt?
you may disable this warning by unsetting the option CollectApp.check_owner
""").format(src_path, student_id, owner))
dest_path = self.coursedir.format_path(self.coursedir.submitted_directory, student_id, self.coursedir.assignment_id)
if not os.path.exists(os.path.dirname(dest_path)):
os.makedirs(os.path.dirname(dest_path))
copy = False
updating = False
if os.path.isdir(dest_path):
existing_timestamp = self.coursedir.get_existing_timestamp(dest_path)
new_timestamp = rec['timestamp']
if self.update and (existing_timestamp is None or new_timestamp > existing_timestamp):
copy = True
updating = True
elif self.before_duedate and existing_timestamp != new_timestamp:
copy = True
updating = True
else:
copy = True
if copy:
if updating:
self.log.info("Updating submission: {} {}".format(student_id, self.coursedir.assignment_id))
shutil.rmtree(dest_path)
else:
self.log.info("Collecting submission: {} {}".format(student_id, self.coursedir.assignment_id))
self.do_copy(src_path, dest_path)
else:
if self.update:
self.log.info("No newer submission to collect: {} {}".format(
student_id, self.coursedir.assignment_id
))
else:
self.log.info("Submission already exists, use --update to update: {} {}".format(
student_id, self.coursedir.assignment_id
))
| bsd-3-clause | dab6322f176e83ce7e069b0d85e629b9 | 41.951389 | 128 | 0.580275 | 4.233402 | false | false | false | false |
jupyter/nbgrader | demos/demo_multiple_classes/jupyterhub_config.py | 2 | 3458 | c = get_config()
# Our user list
c.Authenticator.allowed_users = [
'instructor1',
'instructor2',
'student1',
'grader-course101',
'grader-course123',
]
# instructor1 and instructor2 have access to different shared servers.
# Note that groups providing access to the formgrader *must* start with
# 'formgrade-', and groups providing access to course materials *must*
# start with 'nbgrader-' in order for nbgrader to work correctly.
c.JupyterHub.load_groups = {
'instructors': [
'instructor1',
'instructor2',
],
'formgrade-course101': [
'instructor1',
'grader-course101',
],
'formgrade-course123': [
'instructor2',
'grader-course123',
],
'nbgrader-course101': [
'instructor1',
'student1',
],
'nbgrader-course123': [
'instructor2',
'student1',
],
}
c.JupyterHub.load_roles = roles = [
{
'name': 'instructor',
'groups': ['instructors'],
'scopes': [
# these are the scopes required for the admin UI
'admin:users',
'admin:servers',
],
},
# The class_list extension needs permission to access services
{
'name': 'server',
'scopes': [
'inherit',
# in JupyterHub 2.4, this can be a list of permissions
# greater than the owner and the result will be the intersection;
# until then, 'inherit' is the only way to have variable permissions
# for the server token by user
# "access:services",
# "list:services",
# "read:services",
# "users:activity!user",
# "access:servers!user",
],
},
]
for course in ['course101', 'course123']:
# access to formgrader
roles.append(
{
'name': f'formgrade-{course}',
'groups': [f'formgrade-{course}'],
'scopes': [
f'access:services!service={course}',
],
}
)
# access to course materials
roles.append(
{
'name': f'nbgrader-{course}',
'groups': [f'nbgrader-{course}'],
'scopes': [
# access to the services API to discover the service(s)
'list:services',
f'read:services!service={course}',
],
}
)
# Start the notebook server as a service. The port can be whatever you want
# and the group has to match the name of the group defined above.
c.JupyterHub.services = [
{
'name': 'course101',
'url': 'http://127.0.0.1:9999',
'command': [
'jupyterhub-singleuser',
'--debug',
],
'user': 'grader-course101',
'cwd': '/home/grader-course101',
'environment': {
# specify formgrader as default landing page
'JUPYTERHUB_DEFAULT_URL': '/formgrader'
},
'api_token': '{{course101_token}}',
},
{
'name': 'course123',
'url': 'http://127.0.0.1:9998',
'command': [
'jupyterhub-singleuser',
'--debug',
],
'user': 'grader-course123',
'cwd': '/home/grader-course123',
'environment': {
# specify formgrader as default landing page
'JUPYTERHUB_DEFAULT_URL': '/formgrader'
},
'api_token': '{{course123_token}}',
},
]
| bsd-3-clause | 22b5843b7bcea03c8ad6bf2eccddc69d | 26.887097 | 80 | 0.522846 | 3.974713 | false | false | false | false |
jupyter/nbgrader | nbgrader/apps/api.py | 2 | 42264 | import glob
import re
import sys
import os
import logging
import warnings
from traitlets.config import LoggingConfigurable, Config, get_config
from traitlets import Instance, Enum, Unicode, observe
from ..coursedir import CourseDirectory
from ..converters import GenerateAssignment, Autograde, GenerateFeedback, GenerateSolution
from ..exchange import ExchangeFactory, ExchangeError
from ..api import MissingEntry, Gradebook, Student, SubmittedAssignment
from ..utils import parse_utc, temp_attrs, capture_log, as_timezone, to_numeric_tz
from ..auth import Authenticator
class NbGraderAPI(LoggingConfigurable):
"""A high-level API for using nbgrader."""
coursedir = Instance(CourseDirectory, allow_none=True)
authenticator = Instance(Authenticator, allow_none=True)
exchange = Instance(ExchangeFactory, allow_none=True)
# The log level for the application
log_level = Enum(
(0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
default_value=logging.INFO,
help="Set the log level by value or name."
).tag(config=True)
timezone = Unicode(
"UTC",
help="Timezone for displaying timestamps"
).tag(config=True)
timestamp_format = Unicode(
"%Y-%m-%d %H:%M:%S %Z",
help="Format string for displaying timestamps"
).tag(config=True)
@observe('log_level')
def _log_level_changed(self, change):
"""Adjust the log level when log_level is set."""
new = change.new
if isinstance(new, str):
new = getattr(logging, new)
self.log_level = new
self.log.setLevel(new)
def __init__(self, coursedir=None, authenticator=None, exchange=None, **kwargs):
"""Initialize the API.
Arguments
---------
coursedir: :class:`nbgrader.coursedir.CourseDirectory`
(Optional) A course directory object.
authenticator : :class:~`nbgrader.auth.BaseAuthenticator`
(Optional) An authenticator instance for communicating with an
external database.
exchange : :class:~`nbgrader.exchange.ExchangeFactory`
(Optional) A factory for creating the exchange classes used
for distributing assignments and feedback.
kwargs:
Additional keyword arguments (e.g. ``parent``, ``config``)
"""
self.log.setLevel(self.log_level)
super(NbGraderAPI, self).__init__(**kwargs)
if coursedir is None:
self.coursedir = CourseDirectory(parent=self)
else:
self.coursedir = coursedir
if authenticator is None:
self.authenticator = Authenticator(parent=self)
else:
self.authenticator = authenticator
if exchange is None:
self.exchange = ExchangeFactory(parent=self)
else:
self.exchange = exchange
if sys.platform != 'win32':
lister = self.exchange.List(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
self.course_id = self.coursedir.course_id
if hasattr(lister, "root"):
self.exchange_root = lister.root
else:
# For non-fs based exchanges
self.exchange_root = ''
try:
lister.start()
except ExchangeError:
self.exchange_missing = True
else:
self.exchange_missing = False
else:
self.course_id = ''
self.exchange_root = ''
self.exchange_missing = True
@property
def exchange_is_functional(self):
return self.course_id and not self.exchange_missing and sys.platform != 'win32'
@property
def gradebook(self):
"""An instance of :class:`nbgrader.api.Gradebook`.
Note that each time this property is accessed, a new gradebook is
created. The user is responsible for destroying the gradebook through
:func:`~nbgrader.api.Gradebook.close`.
"""
return Gradebook(self.coursedir.db_url, self.course_id)
def get_source_assignments(self):
"""Get the names of all assignments in the `source` directory.
Returns
-------
assignments: set
A set of assignment names
"""
filenames = glob.glob(self.coursedir.format_path(
self.coursedir.source_directory,
student_id='.',
assignment_id='*'))
assignments = set([])
for filename in filenames:
# skip files that aren't directories
if not os.path.isdir(filename):
continue
# parse out the assignment name
regex = self.coursedir.format_path(
self.coursedir.source_directory,
student_id='.',
assignment_id='(?P<assignment_id>.*)',
escape=True)
matches = re.match(regex, filename)
if matches:
assignments.add(matches.groupdict()['assignment_id'])
return assignments
def get_released_assignments(self):
"""Get the names of all assignments that have been released to the
exchange directory. If the course id is blank, this returns an empty
set.
Returns
-------
assignments: set
A set of assignment names
"""
if self.exchange_is_functional:
lister = self.exchange.List(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
released = set([x['assignment_id'] for x in lister.start()])
else:
released = set([])
return released
def get_submitted_students(self, assignment_id):
"""Get the ids of students that have submitted a given assignment
(determined by whether or not a submission exists in the `submitted`
directory).
Arguments
---------
assignment_id: string
The name of the assignment. May be * to select for all assignments.
Returns
-------
students: set
A set of student ids
"""
# get the names of all student submissions in the `submitted` directory
filenames = glob.glob(self.coursedir.format_path(
self.coursedir.submitted_directory,
student_id='*',
assignment_id=assignment_id))
students = set([])
for filename in filenames:
# skip files that aren't directories
if not os.path.isdir(filename):
continue
# parse out the student id
if assignment_id == "*":
assignment_id = ".*"
regex = self.coursedir.format_path(
self.coursedir.submitted_directory,
student_id='(?P<student_id>.*)',
assignment_id=assignment_id,
escape=True)
matches = re.match(regex, filename)
if matches:
students.add(matches.groupdict()['student_id'])
return students
def get_submitted_timestamp(self, assignment_id, student_id):
"""Gets the timestamp of a submitted assignment.
Arguments
---------
assignment_id: string
The assignment name
student_id: string
The student id
Returns
-------
timestamp: datetime.datetime or None
The timestamp of the submission, or None if the timestamp does
not exist
"""
assignment_dir = os.path.abspath(self.coursedir.format_path(
self.coursedir.submitted_directory,
student_id,
assignment_id))
timestamp_pth = os.path.join(assignment_dir, 'timestamp.txt')
if os.path.exists(timestamp_pth):
with open(timestamp_pth, 'r') as fh:
return parse_utc(fh.read().strip())
def get_autograded_students(self, assignment_id):
"""Get the ids of students whose submission for a given assignment
has been autograded. This is determined based on satisfying all of the
following criteria:
1. There is a directory present in the `autograded` directory.
2. The submission is present in the database.
3. The timestamp of the autograded submission is the same as the
timestamp of the original submission (in the `submitted` directory).
Returns
-------
students: set
A set of student ids
"""
# get all autograded submissions
with self.gradebook as gb:
ag_timestamps = dict(gb.db\
.query(Student.id, SubmittedAssignment.timestamp)\
.join(SubmittedAssignment)\
.filter(SubmittedAssignment.name == assignment_id)\
.all())
ag_students = set(ag_timestamps.keys())
students = set([])
for student_id in ag_students:
# skip files that aren't directories
filename = self.coursedir.format_path(
self.coursedir.autograded_directory,
student_id=student_id,
assignment_id=assignment_id)
if not os.path.isdir(filename):
continue
# get the timestamps and check whether the submitted timestamp is
# newer than the autograded timestamp
submitted_timestamp = self.get_submitted_timestamp(assignment_id, student_id)
autograded_timestamp = ag_timestamps[student_id]
if submitted_timestamp != autograded_timestamp:
continue
students.add(student_id)
return students
def get_assignment(self, assignment_id, released=None):
"""Get information about an assignment given its name.
Arguments
---------
assignment_id: string
The name of the assignment
released: list
(Optional) A set of names of released assignments, obtained via
self.get_released_assignments().
Returns
-------
assignment: dict
A dictionary containing information about the assignment
"""
# get the set of released assignments if not given
if not released:
released = self.get_released_assignments()
# check whether there is a source version of the assignment
sourcedir = os.path.abspath(self.coursedir.format_path(
self.coursedir.source_directory,
student_id='.',
assignment_id=assignment_id))
if not os.path.isdir(sourcedir):
return
# see if there is information about the assignment in the database
try:
with self.gradebook as gb:
db_assignment = gb.find_assignment(assignment_id)
assignment = db_assignment.to_dict()
if db_assignment.duedate:
ts = as_timezone(db_assignment.duedate, self.timezone)
assignment["display_duedate"] = ts.strftime(self.timestamp_format)
assignment["duedate_notimezone"] = ts.replace(tzinfo=None).isoformat()
else:
assignment["display_duedate"] = None
assignment["duedate_notimezone"] = None
assignment["duedate_timezone"] = to_numeric_tz(self.timezone)
assignment["average_score"] = gb.average_assignment_score(assignment_id)
assignment["average_code_score"] = gb.average_assignment_code_score(assignment_id)
assignment["average_written_score"] = gb.average_assignment_written_score(assignment_id)
assignment["average_task_score"] = gb.average_assignment_task_score(assignment_id)
except MissingEntry:
assignment = {
"id": None,
"name": assignment_id,
"duedate": None,
"display_duedate": None,
"duedate_notimezone": None,
"duedate_timezone": to_numeric_tz(self.timezone),
"average_score": 0,
"average_code_score": 0,
"average_written_score": 0,
"average_task_score": 0,
"max_score": 0,
"max_code_score": 0,
"max_written_score": 0,
"max_task_score": 0
}
# get released status
if not self.exchange_is_functional:
assignment["releaseable"] = False
assignment["status"] = "draft"
else:
assignment["releaseable"] = True
if assignment_id in released:
assignment["status"] = "released"
else:
assignment["status"] = "draft"
# get source directory
assignment["source_path"] = os.path.relpath(sourcedir, self.coursedir.root)
# get release directory
releasedir = os.path.abspath(self.coursedir.format_path(
self.coursedir.release_directory,
student_id='.',
assignment_id=assignment_id))
if os.path.exists(releasedir):
assignment["release_path"] = os.path.relpath(releasedir, self.coursedir.root)
else:
assignment["release_path"] = None
# number of submissions
assignment["num_submissions"] = len(self.get_submitted_students(assignment_id))
return assignment
def get_assignments(self):
"""Get a list of information about all assignments.
Returns
-------
assignments: list
A list of dictionaries containing information about each assignment
"""
released = self.get_released_assignments()
assignments = []
for x in self.get_source_assignments():
assignments.append(self.get_assignment(x, released=released))
assignments.sort(key=lambda x: (x["duedate"] if x["duedate"] is not None else "None", x["name"]))
return assignments
def get_notebooks(self, assignment_id):
"""Get a list of notebooks in an assignment.
Arguments
---------
assignment_id: string
The name of the assignment
Returns
-------
notebooks: list
A list of dictionaries containing information about each notebook
"""
with self.gradebook as gb:
try:
assignment = gb.find_assignment(assignment_id)
except MissingEntry:
assignment = None
# if the assignment exists in the database
if assignment and assignment.notebooks:
notebooks = []
for notebook in assignment.notebooks:
x = notebook.to_dict()
x["average_score"] = gb.average_notebook_score(notebook.name, assignment.name)
x["average_code_score"] = gb.average_notebook_code_score(notebook.name, assignment.name)
x["average_written_score"] = gb.average_notebook_written_score(notebook.name, assignment.name)
x["average_task_score"] = gb.average_notebook_task_score(notebook.name, assignment.name)
notebooks.append(x)
# if it doesn't exist in the database
else:
sourcedir = self.coursedir.format_path(
self.coursedir.source_directory,
student_id='.',
assignment_id=assignment_id)
escaped_sourcedir = self.coursedir.format_path(
self.coursedir.source_directory,
student_id='.',
assignment_id=assignment_id,
escape=True)
notebooks = []
for filename in glob.glob(os.path.join(sourcedir, "*.ipynb")):
regex = re.escape(os.path.sep).join([escaped_sourcedir, "(?P<notebook_id>.*).ipynb"])
matches = re.match(regex, filename)
notebook_id = matches.groupdict()['notebook_id']
notebooks.append({
"name": notebook_id,
"id": None,
"average_score": 0,
"average_code_score": 0,
"average_written_score": 0,
"average_task_score": 0,
"max_score": 0,
"max_code_score": 0,
"max_written_score": 0,
"max_task_score": 0,
"needs_manual_grade": False,
"num_submissions": 0
})
return notebooks
def get_submission(self, assignment_id, student_id, ungraded=None, students=None):
"""Get information about a student's submission of an assignment.
Arguments
---------
assignment_id: string
The name of the assignment
student_id: string
The student's id
ungraded: set
(Optional) A set of student ids corresponding to students whose
submissions have not yet been autograded.
students: dict
(Optional) A dictionary of dictionaries, keyed by student id,
containing information about students.
Returns
-------
submission: dict
A dictionary containing information about the submission
"""
if ungraded is None:
autograded = self.get_autograded_students(assignment_id)
ungraded = self.get_submitted_students(assignment_id) - autograded
if students is None:
students = {x['id']: x for x in self.get_students()}
if student_id in ungraded:
ts = self.get_submitted_timestamp(assignment_id, student_id)
if ts:
timestamp = ts.isoformat()
display_timestamp = as_timezone(ts, self.timezone).strftime(self.timestamp_format)
else:
timestamp = None
display_timestamp = None
submission = {
"id": None,
"name": assignment_id,
"timestamp": timestamp,
"display_timestamp": display_timestamp,
"score": 0.0,
"max_score": 0.0,
"code_score": 0.0,
"max_code_score": 0.0,
"written_score": 0.0,
"max_written_score": 0.0,
"task_score": 0.0,
"max_task_score": 0.0,
"needs_manual_grade": False,
"autograded": False,
"submitted": True,
"student": student_id,
}
if student_id not in students:
submission["last_name"] = None
submission["first_name"] = None
else:
submission["last_name"] = students[student_id]["last_name"]
submission["first_name"] = students[student_id]["first_name"]
elif student_id in autograded:
with self.gradebook as gb:
try:
db_submission = gb.find_submission(assignment_id, student_id)
submission = db_submission.to_dict()
if db_submission.timestamp:
submission["display_timestamp"] = as_timezone(
db_submission.timestamp, self.timezone).strftime(self.timestamp_format)
else:
submission["display_timestamp"] = None
except MissingEntry:
return None
submission["autograded"] = True
submission["submitted"] = True
else:
submission = {
"id": None,
"name": assignment_id,
"timestamp": None,
"display_timestamp": None,
"score": 0.0,
"max_score": 0.0,
"code_score": 0.0,
"max_code_score": 0.0,
"written_score": 0.0,
"max_written_score": 0.0,
"task_score": 0.0,
"max_task_score": 0.0,
"needs_manual_grade": False,
"autograded": False,
"submitted": False,
"student": student_id,
}
if student_id not in students:
submission["last_name"] = None
submission["first_name"] = None
else:
submission["last_name"] = students[student_id]["last_name"]
submission["first_name"] = students[student_id]["first_name"]
return submission
def get_submissions(self, assignment_id):
"""Get a list of submissions of an assignment. Each submission
corresponds to a student.
Arguments
---------
assignment_id: string
The name of the assignment
Returns
-------
notebooks: list
A list of dictionaries containing information about each submission
"""
with self.gradebook as gb:
db_submissions = gb.submission_dicts(assignment_id)
ungraded = self.get_submitted_students(assignment_id) - self.get_autograded_students(assignment_id)
students = {x['id']: x for x in self.get_students()}
submissions = []
for submission in db_submissions:
if submission["student"] in ungraded:
continue
ts = submission["timestamp"]
if ts:
submission["timestamp"] = ts.isoformat()
submission["display_timestamp"] = as_timezone(
ts, self.timezone).strftime(self.timestamp_format)
else:
submission["timestamp"] = None
submission["display_timestamp"] = None
submission["autograded"] = True
submission["submitted"] = True
submissions.append(submission)
for student_id in ungraded:
submission = self.get_submission(
assignment_id, student_id, ungraded=ungraded, students=students)
submissions.append(submission)
submissions.sort(key=lambda x: x["student"])
return submissions
def _filter_existing_notebooks(self, assignment_id, notebooks):
"""Filters a list of notebooks so that it only includes those notebooks
which actually exist on disk.
This functionality is necessary for cases where student delete or rename
on or more notebooks in their assignment, but still submit the assignment.
Arguments
---------
assignment_id: string
The name of the assignment
notebooks: list
List of :class:`~nbgrader.api.SubmittedNotebook` objects
Returns
-------
submissions: list
List of :class:`~nbgrader.api.SubmittedNotebook` objects
"""
# Making a filesystem call for every notebook in the assignment
# can be very slow on certain setups, such as using NFS, see
# https://github.com/jupyter/nbgrader/issues/929
#
# If students are using the exchange and submitting with
# ExchangeSubmit.strict == True, then all the notebooks we expect
# should be here already so we don't need to filter for only
# existing notebooks in that case.
if self.exchange_is_functional:
app = self.exchange.Submit(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
if app.strict:
return sorted(notebooks, key=lambda x: x.id)
submissions = list()
for nb in notebooks:
filename = os.path.join(
os.path.abspath(self.coursedir.format_path(
self.coursedir.autograded_directory,
student_id=nb.student.id,
assignment_id=assignment_id)),
"{}.ipynb".format(nb.name))
if os.path.exists(filename):
submissions.append(nb)
return sorted(submissions, key=lambda x: x.id)
def get_notebook_submission_indices(self, assignment_id, notebook_id):
"""Get a dictionary mapping unique submission ids to indices of the
submissions relative to the full list of submissions.
Arguments
---------
assignment_id: string
The name of the assignment
notebook_id: string
The name of the notebook
Returns
-------
indices: dict
A dictionary mapping submission ids to the index of each submission
"""
with self.gradebook as gb:
notebooks = gb.notebook_submissions(notebook_id, assignment_id)
submissions = self._filter_existing_notebooks(assignment_id, notebooks)
return dict([(x.id, i) for i, x in enumerate(submissions)])
def get_notebook_submissions(self, assignment_id, notebook_id):
"""Get a list of submissions for a particular notebook in an assignment.
Arguments
---------
assignment_id: string
The name of the assignment
notebook_id: string
The name of the notebook
Returns
-------
submissions: list
A list of dictionaries containing information about each submission.
"""
with self.gradebook as gb:
try:
gb.find_notebook(notebook_id, assignment_id)
except MissingEntry:
return []
submissions = gb.notebook_submission_dicts(notebook_id, assignment_id)
indices = self.get_notebook_submission_indices(assignment_id, notebook_id)
for nb in submissions:
nb['index'] = indices.get(nb['id'], None)
submissions = [x for x in submissions if x['index'] is not None]
submissions.sort(key=lambda x: x["id"])
return submissions
def get_student(self, student_id, submitted=None):
"""Get a dictionary containing information about the given student.
Arguments
---------
student_id: string
The unique id of the student
submitted: set
(Optional) A set of unique ids of students who have submitted an assignment
Returns
-------
student: dictionary
A dictionary containing information about the student, or None if
the student does not exist
"""
if submitted is None:
submitted = self.get_submitted_students("*")
try:
with self.gradebook as gb:
student = gb.find_student(student_id).to_dict()
except MissingEntry:
if student_id in submitted:
student = {
"id": student_id,
"last_name": None,
"first_name": None,
"email": None,
"lms_user_id": None,
"score": 0.0,
"max_score": 0.0
}
else:
return None
return student
def get_students(self):
"""Get a list containing information about all the students in class.
Returns
-------
students: list
A list of dictionaries containing information about all the students
"""
with self.gradebook as gb:
in_db = set([x.id for x in gb.students])
students = gb.student_dicts()
submitted = self.get_submitted_students("*")
for student_id in (submitted - in_db):
students.append({
"id": student_id,
"last_name": None,
"first_name": None,
"email": None,
"lms_user_id": None,
"score": 0.0,
"max_score": 0.0
})
students.sort(key=lambda x: (x["last_name"] or "None", x["first_name"] or "None", x["id"]))
return students
def get_student_submissions(self, student_id):
"""Get information about all submissions from a particular student.
Arguments
---------
student_id: string
The unique id of the student
Returns
-------
submissions: list
A list of dictionaries containing information about all the student's
submissions
"""
# return just an empty list if the student doesn't exist
submissions = []
for assignment_id in self.get_source_assignments():
submission = self.get_submission(assignment_id, student_id)
submissions.append(submission)
submissions.sort(key=lambda x: x["name"])
return submissions
def get_student_notebook_submissions(self, student_id, assignment_id):
"""Gets information about all notebooks within a submitted assignment.
Arguments
---------
student_id: string
The unique id of the student
assignment_id: string
The name of the assignment
Returns
-------
submissions: list
A list of dictionaries containing information about the submissions
"""
with self.gradebook as gb:
try:
assignment = gb.find_submission(assignment_id, student_id)
student = assignment.student
except MissingEntry:
return []
submissions = []
for notebook in assignment.notebooks:
filename = os.path.join(
os.path.abspath(self.coursedir.format_path(
self.coursedir.autograded_directory,
student_id=student_id,
assignment_id=assignment_id)),
"{}.ipynb".format(notebook.name))
if os.path.exists(filename):
submissions.append(notebook.to_dict())
else:
submissions.append({
"id": None,
"name": notebook.name,
"student": student_id,
"last_name": student.last_name,
"first_name": student.first_name,
"score": 0,
"max_score": notebook.max_score,
"code_score": 0,
"max_code_score": notebook.max_code_score,
"written_score": 0,
"max_written_score": notebook.max_written_score,
"task_score": 0,
"max_task_score": notebook.max_task_score,
"needs_manual_grade": False,
"failed_tests": False,
"flagged": False
})
submissions.sort(key=lambda x: x["name"])
return submissions
def assign(self, *args, **kwargs):
"""Deprecated, please use `generate_assignment` instead."""
msg = (
"The `assign` method is deprecated, please use `generate_assignment` "
"instead. This method will be removed in a future version of nbgrader.")
warnings.warn(msg, DeprecationWarning)
self.log.warning(msg)
return self.generate_assignment(*args, **kwargs)
def generate_assignment(self, assignment_id, force=True, create=True):
"""Run ``nbgrader generate_assignment`` for a particular assignment.
Arguments
---------
assignment_id: string
The name of the assignment
force: bool
Whether to force creating the student version, even if it already
exists.
create: bool
Whether to create the assignment in the database, if it doesn't
already exist.
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
with temp_attrs(self.coursedir, assignment_id=assignment_id):
app = GenerateAssignment(coursedir=self.coursedir, parent=self)
app.force = force
app.create_assignment = create
return capture_log(app)
def unrelease(self, assignment_id):
"""Run ``nbgrader list --remove`` for a particular assignment.
Arguments
---------
assignment_id: string
The name of the assignment
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
if sys.platform != 'win32':
with temp_attrs(self.coursedir, assignment_id=assignment_id):
app = self.exchange.List(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
app.remove = True
return capture_log(app)
def release(self, *args, **kwargs):
"""Deprecated, please use `release_assignment` instead."""
msg = (
"The `release` method is deprecated, please use `release_assignment` "
"instead. This method will be removed in a future version of nbgrader.")
warnings.warn(msg, DeprecationWarning)
self.log.warning(msg)
return self.release_assignment(*args, **kwargs)
def release_assignment(self, assignment_id):
"""Run ``nbgrader release_assignment`` for a particular assignment.
Arguments
---------
assignment_id: string
The name of the assignment
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
if sys.platform != 'win32':
with temp_attrs(self.coursedir, assignment_id=assignment_id):
app = self.exchange.ReleaseAssignment(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
return capture_log(app)
def collect(self, assignment_id, update=True):
"""Run ``nbgrader collect`` for a particular assignment.
Arguments
---------
assignment_id: string
The name of the assignment
update: bool
Whether to update already-collected assignments with newer
submissions, if they exist
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
if sys.platform != 'win32':
with temp_attrs(self.coursedir, assignment_id=assignment_id):
app = self.exchange.Collect(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
app.update = update
return capture_log(app)
def autograde(self, assignment_id, student_id, force=True, create=True):
"""Run ``nbgrader autograde`` for a particular assignment and student.
Arguments
---------
assignment_id: string
The name of the assignment
student_id: string
The unique id of the student
force: bool
Whether to autograde the submission, even if it's already been
autograded
create: bool
Whether to create students in the database if they don't already
exist
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id=student_id):
app = Autograde(coursedir=self.coursedir, parent=self)
app.force = force
app.create_student = create
return capture_log(app)
def generate_feedback(self, assignment_id, student_id=None, force=True):
"""Run ``nbgrader generate_feedback`` for a particular assignment and student.
Arguments
---------
assignment_id: string
The name of the assignment
student_id: string
The name of the student (optional). If not provided, then generate
feedback from autograded submissions.
force: bool
Whether to force generating feedback, even if it already exists.
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
# Because we may be using HTMLExporter.template_name in other
# parts of the the UI, we need to make sure that the template
# is explicitply 'feedback` here:
c = Config()
c.HTMLExporter.template_name = 'feedback'
if student_id is not None:
with temp_attrs(self.coursedir,
assignment_id=assignment_id,
student_id=student_id):
app = GenerateFeedback(coursedir=self.coursedir, parent=self)
app.update_config(c)
app.force = force
return capture_log(app)
else:
with temp_attrs(self.coursedir,
assignment_id=assignment_id):
app = GenerateFeedback(coursedir=self.coursedir, parent=self)
app.update_config(c)
app.force = force
return capture_log(app)
def release_feedback(self, assignment_id, student_id=None):
"""Run ``nbgrader release_feedback`` for a particular assignment/student.
Arguments
---------
assignment_id: string
The name of the assignment
assignment_id: string
The name of the student (optional). If not provided, then release
all generated feedback.
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
"""
if student_id is not None:
with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id=student_id):
app = self.exchange.ReleaseFeedback(
coursedir=self.coursedir,
authentictor=self.authenticator,
parent=self)
return capture_log(app)
else:
with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id='*'):
app = self.exchange.ReleaseFeedback(
coursedir=self.coursedir,
authentictor=self.authenticator,
parent=self)
return capture_log(app)
def fetch_feedback(self, assignment_id, student_id):
"""Run ``nbgrader fetch_feedback`` for a particular assignment/student.
Arguments
---------
assignment_id: string
The name of the assignment
student_id: string
The name of the student.
Returns
-------
result: dict
A dictionary with the following keys (error and log may or may not be present):
- success (bool): whether or not the operation completed successfully
- error (string): formatted traceback
- log (string): captured log output
- value (list of dict): all submitted assignments
"""
with temp_attrs(self.coursedir, assignment_id=assignment_id, student_id=student_id):
app = self.exchange.FetchFeedback(
coursedir=self.coursedir,
authentictor=self.authenticator,
parent=self)
ret_dic = capture_log(app)
# assignment tab needs a 'value' field with the info needed to repopulate
# the tables.
with temp_attrs(self.coursedir, assignment_id='*', student_id=student_id):
lister_rel = self.exchange.List(
inbound=False, cached=True,
coursedir=self.coursedir,
authenticator=self.authenticator,
config=self.config)
assignments = lister_rel.start()
ret_dic["value"] = sorted(assignments, key=lambda x: (x['course_id'], x['assignment_id']))
return ret_dic
| bsd-3-clause | ddc956921211eb654fc281a886055f1b | 35.403101 | 114 | 0.554254 | 4.858489 | false | false | false | false |
jupyter/nbgrader | nbgrader/preprocessors/overwritecells.py | 3 | 4363 | from nbformat.v4.nbbase import validate
from .. import utils
from ..api import Gradebook, MissingEntry
from . import NbGraderPreprocessor
from nbconvert.exporters.exporter import ResourcesDict
from nbformat.notebooknode import NotebookNode
from typing import Tuple, Any
class OverwriteCells(NbGraderPreprocessor):
"""A preprocessor to overwrite information about grade and solution cells."""
def preprocess(self, nb: NotebookNode, resources: ResourcesDict) -> Tuple[NotebookNode, ResourcesDict]:
# pull information from the resources
self.notebook_id = resources['nbgrader']['notebook']
self.assignment_id = resources['nbgrader']['assignment']
self.db_url = resources['nbgrader']['db_url']
# connect to the database
self.gradebook = Gradebook(self.db_url)
with self.gradebook:
nb, resources = super(OverwriteCells, self).preprocess(nb, resources)
return nb, resources
def update_cell_type(self, cell: NotebookNode, cell_type: str) -> None:
if cell.cell_type == cell_type:
return
elif cell_type == 'code':
cell.cell_type = 'code'
cell.outputs = []
cell.execution_count = None
validate(cell, 'code_cell')
elif cell_type == 'markdown':
cell.cell_type = 'markdown'
if 'outputs' in cell:
del cell['outputs']
if 'execution_count' in cell:
del cell['execution_count']
validate(cell, 'markdown_cell')
def report_change(self, name: str, attr: str, old: Any, new: Any) -> None:
self.log.warning(
"Attribute '%s' for cell %s has changed! (should be: %s, got: %s)", attr, name, old, new)
def preprocess_cell(self,
cell: NotebookNode,
resources: ResourcesDict,
cell_index: int
) -> Tuple[NotebookNode, ResourcesDict]:
grade_id = cell.metadata.get('nbgrader', {}).get('grade_id', None)
if grade_id is None:
return cell, resources
try:
source_cell = self.gradebook.find_source_cell(
grade_id,
self.notebook_id,
self.assignment_id)
except MissingEntry:
self.log.warning("Cell '{}' does not exist in the database".format(grade_id))
del cell.metadata.nbgrader['grade_id']
return cell, resources
# check that the cell type hasn't changed
if cell.cell_type != source_cell.cell_type:
self.report_change(grade_id, "cell_type", source_cell.cell_type, cell.cell_type)
self.update_cell_type(cell, source_cell.cell_type)
# check that the locked status hasn't changed
if utils.is_locked(cell) != source_cell.locked:
self.report_change(grade_id, "locked", source_cell.locked, utils.is_locked(cell))
cell.metadata.nbgrader["locked"] = source_cell.locked
# if it's a grade cell, check that the max score hasn't changed
if utils.is_grade(cell):
grade_cell = self.gradebook.find_graded_cell(
grade_id,
self.notebook_id,
self.assignment_id)
old_points = float(grade_cell.max_score)
new_points = float(cell.metadata.nbgrader["points"])
if old_points != new_points:
self.report_change(grade_id, "points", old_points, new_points)
cell.metadata.nbgrader["points"] = old_points
# always update the checksum, just in case
cell.metadata.nbgrader["checksum"] = source_cell.checksum
# if it's locked, check that the checksum hasn't changed
if source_cell.locked:
old_checksum = source_cell.checksum
new_checksum = utils.compute_checksum(cell)
if old_checksum != new_checksum:
self.report_change(grade_id, "checksum", old_checksum, new_checksum)
cell.source = source_cell.source
# double check the the checksum is correct now
if utils.compute_checksum(cell) != source_cell.checksum:
raise RuntimeError("Inconsistent checksums for cell {}".format(source_cell.name))
return cell, resources
| bsd-3-clause | 89c23fd50d135be026e1c6b0d702c92a | 40.951923 | 107 | 0.599587 | 4.19923 | false | false | false | false |
jupyter/nbgrader | tasks.py | 1 | 4400 | #!/usr/bin/env python
import os
import sys
import subprocess as sp
import argparse
def echo(msg):
print("\033[1;37m{0}\033[0m".format(msg))
def run(cmd, **kwargs):
echo(cmd)
return sp.check_call(cmd, shell=True, **kwargs)
try:
from nbformat import read
except ImportError:
echo("Warning: nbformat could not be imported, some tasks may not work")
WINDOWS = sys.platform == 'win32'
def _check_if_directory_in_path(pth, target):
while pth != '':
pth, dirname = os.path.split(pth)
if dirname == target:
return True
return False
def docs(args):
del args # unused
run('git clean -fdX nbgrader/docs')
if not WINDOWS:
run('pytest --nbval-lax --current-env nbgrader/docs/source/user_guide/*.ipynb')
run('python nbgrader/docs/source/build_docs.py')
run('python nbgrader/docs/source/clear_docs.py')
run('make -C nbgrader/docs html')
run('make -C nbgrader/docs linkcheck')
def cleandocs(args):
del args # unused
run('python nbgrader/docs/source/clear_docs.py')
def _run_tests(mark, skip, junitxml, paralell=False):
cmd = []
cmd.append('pytest')
if not WINDOWS:
cmd.append('--cov nbgrader')
cmd.append('--no-cov-on-fail')
if junitxml:
cmd.extend(['--junitxml', junitxml])
cmd.append('-v')
cmd.append('-x')
if paralell:
cmd.extend(['--numprocesses', 'auto'])
cmd.extend(['--reruns', '4'])
# cmd.extend(['--mypy'])
marks = []
if mark is not None:
marks.append(mark)
if skip is not None:
marks.append("not {}".format(skip))
if len(marks) > 0:
cmd.append('-m "{}"'.format(" and ".join(marks)))
run(" ".join(cmd))
if not WINDOWS:
run("ls -a .coverage*")
run("coverage combine || true")
def _run_ts_test():
cmd = ['npx', 'playwright', 'test', '--retries=3']
run(" ".join(cmd))
def tests(args):
if args.group == 'python':
_run_tests(
mark="not nbextensions", skip=args.skip, junitxml=args.junitxml, paralell=True)
elif args.group == 'nbextensions':
_run_tests(mark="nbextensions", skip=args.skip, junitxml=args.junitxml)
elif args.group =='labextensions':
_run_ts_test()
elif args.group == 'docs':
docs(args)
elif args.group == 'all':
_run_tests(mark=None, skip=args.skip, junitxml=args.junitxml)
_run_ts_test()
else:
raise ValueError("Invalid test group: {}".format(args.group))
def aftersuccess(args):
if args.group in ('python', 'nbextensions'):
run('codecov')
else:
echo('Nothing to do.')
def js(args):
run('npm install')
run('./node_modules/.bin/bower install --config.interactive=false')
if args.clean:
run('git clean -fdX nbgrader/server_extensions/formgrader/static/components')
def install(args):
if args.group == 'docs':
cmd = 'pip install .[docs,tests]'
else:
cmd = 'pip install -e .[tests]'
env = os.environ.copy()
if args.group not in ['all', 'labextensions']:
env['SKIP_JUPYTER_BUILDER'] = '1'
run(cmd, env=env)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# docs
docs_parser = subparsers.add_parser('docs')
docs_parser.set_defaults(func=docs)
# cleandocs
cleandocs_parser = subparsers.add_parser('cleandocs')
cleandocs_parser.set_defaults(func=cleandocs)
# tests
tests_parser = subparsers.add_parser('tests')
tests_parser.add_argument('--group', type=str, default='all')
tests_parser.add_argument('--skip', type=str, default=None)
tests_parser.add_argument('--junitxml', type=str, default=None)
tests_parser.set_defaults(func=tests)
# aftersuccess
aftersuccess_parser = subparsers.add_parser('aftersuccess')
aftersuccess_parser.add_argument('--group', type=str, required=True)
aftersuccess_parser.set_defaults(func=aftersuccess)
# js
js_parser = subparsers.add_parser('js')
js_parser.add_argument('--clean', type=bool, default=True)
js_parser.set_defaults(func=js)
# install
install_parser = subparsers.add_parser('install')
install_parser.add_argument('--group', type=str, required=True)
install_parser.set_defaults(func=install)
args = parser.parse_args()
args.func(args)
| bsd-3-clause | a4112b3be78d7626aa4752d771b7f222 | 25.190476 | 91 | 0.624091 | 3.32829 | false | true | false | false |
jupyter/nbgrader | nbgrader/plugins/export.py | 3 | 4603 | from traitlets import Unicode, List
from .base import BasePlugin
from ..api import MissingEntry, Gradebook
class ExportPlugin(BasePlugin):
"""Base class for export plugins."""
to = Unicode("", help="destination to export to").tag(config=True)
student = List(
[], help="list of students to export").tag(config=True)
assignment = List(
[], help="list of assignments to export").tag(config=True)
def export(self, gradebook: Gradebook) -> None:
"""Export grades to another format.
This method MUST be implemented by subclasses. Users should be able to
pass the ``--to`` flag on the command line, which will set the
``self.to`` variable. By default, this variable will be an empty string,
which allows you to specify whatever default you would like.
Arguments
---------
gradebook:
An instance of the gradebook
"""
raise NotImplementedError
class CsvExportPlugin(ExportPlugin):
"""CSV exporter plugin."""
def export(self, gradebook: Gradebook) -> None:
if self.to == "":
dest = "grades.csv"
else:
dest = self.to
if len(self.student) == 0:
allstudents = []
else:
# make sure studentID(s) are a list of strings
allstudents = [str(item) for item in self.student]
if len(self.assignment) == 0:
allassignments = []
else:
# make sure assignment(s) are a list of strings
allassignments = [str(item) for item in self.assignment]
self.log.info("Exporting grades to %s", dest)
if allassignments:
self.log.info("Exporting only assignments: %s", allassignments)
if allstudents:
self.log.info("Exporting only students: %s", allstudents)
fh = open(dest, "w")
keys = [
"assignment",
"duedate",
"timestamp",
"student_id",
"last_name",
"first_name",
"email",
"raw_score",
"late_submission_penalty",
"score",
"max_score"
]
fh.write(",".join(keys) + "\n")
fmt = ",".join(["{" + x + "}" for x in keys]) + "\n"
# Loop over each assignment in the database
for assignment in gradebook.assignments:
# only continue if assignment is required
if allassignments and assignment.name not in allassignments:
continue
# Loop over each student in the database
for student in gradebook.students:
# only continue if student is required
if allstudents and student.id not in allstudents:
continue
# Create a dictionary that will store information
# about this student's submitted assignment
score = {}
score['assignment'] = assignment.name
score['duedate'] = assignment.duedate
score['student_id'] = student.id
score['last_name'] = student.last_name
score['first_name'] = student.first_name
score['email'] = student.email
score['max_score'] = assignment.max_score
# Try to find the submission in the database. If it
# doesn't exist, the `MissingEntry` exception will be
# raised, which means the student didn't submit
# anything, so we assign them a score of zero.
try:
submission = gradebook.find_submission(
assignment.name, student.id)
except MissingEntry:
score['timestamp'] = ''
score['raw_score'] = 0.0
score['late_submission_penalty'] = 0.0
score['score'] = 0.0
else:
penalty = submission.late_submission_penalty
score['timestamp'] = submission.timestamp
score['raw_score'] = submission.score
score['late_submission_penalty'] = penalty
score['score'] = max(0.0, submission.score - penalty)
for key in score:
if score[key] is None:
score[key] = ''
if not isinstance(score[key], str):
score[key] = str(score[key])
fh.write(fmt.format(**score))
fh.close()
| bsd-3-clause | c1406b0ecaba360614acde17ec61e757 | 33.871212 | 80 | 0.528568 | 4.774896 | false | false | false | false |
jupyter/nbgrader | nbgrader/tests/apps/test_nbgrader_db.py | 3 | 17232 | import pytest
import datetime
import shutil
import os
from textwrap import dedent
from os.path import join
from ...api import Gradebook, MissingEntry
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderDb(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["db", "--help-all"])
run_nbgrader(["db", "student", "--help-all"])
run_nbgrader(["db", "student", "list", "--help-all"])
run_nbgrader(["db", "student", "remove", "--help-all"])
run_nbgrader(["db", "student", "add", "--help-all"])
run_nbgrader(["db", "student", "import", "--help-all"])
run_nbgrader(["db", "assignment", "--help-all"])
run_nbgrader(["db", "assignment", "list", "--help-all"])
run_nbgrader(["db", "assignment", "remove", "--help-all"])
run_nbgrader(["db", "assignment", "add", "--help-all"])
run_nbgrader(["db", "assignment", "import", "--help-all"])
def test_no_args(self):
"""Is there an error if no arguments are given?"""
run_nbgrader(["db"], retcode=0)
run_nbgrader(["db", "student"], retcode=0)
run_nbgrader(["db", "student", "remove"], retcode=1)
run_nbgrader(["db", "student", "add"], retcode=1)
run_nbgrader(["db", "student", "import"], retcode=1)
run_nbgrader(["db", "assignment"], retcode=0)
run_nbgrader(["db", "assignment", "remove"], retcode=1)
run_nbgrader(["db", "assignment", "add"], retcode=1)
run_nbgrader(["db", "assignment", "import"], retcode=1)
def test_student_add(self, db):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--last-name=FooBar", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "FooBar"
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--first-name=FooBar", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name == "FooBar"
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--email=foo@bar.com", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email == "foo@bar.com"
def test_student_remove(self, db):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "remove", "foo", "--db", db])
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
# running it again should give an error
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
def test_student_remove_with_submissions(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_student("foo")
# it should fail if we don't run with --force
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
# make sure we can still find the student
with Gradebook(db) as gb:
gb.find_student("foo")
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "--force", "--db", db])
# student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
def test_student_remove_with_submissions_f(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_student("foo")
# it should fail if we don't run with --force
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
# make sure we can still find the student
with Gradebook(db) as gb:
gb.find_student("foo")
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "-f", "--db", db])
# student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
def test_student_list(self, db):
run_nbgrader(["db", "student", "add", "foo", "--first-name=abc", "--last-name=xyz", "--email=foo@bar.com", "--db", db])
run_nbgrader(["db", "student", "add", "bar", "--db", db])
out = run_nbgrader(["db", "student", "list", "--db", db], stdout=True)
assert out == dedent(
"""
There are 2 students in the database:
bar (None, None) -- None, None
foo (xyz, abc) -- foo@bar.com, None
"""
).strip() + "\n"
def test_student_import(self, db, temp_cwd):
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name,email
foo,abc,xyz,foo@bar.com
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyz"
assert student.first_name == "abc"
assert student.email == "foo@bar.com"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
# check that it fails when no id column is given
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
first_name,last_name,email
abc,xyz,foo@bar.com
,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db], retcode=1)
# check that it works ok with extra and missing columns
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name,foo
foo,abc,xyzzzz,blah
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyzzzz"
assert student.first_name == "abc"
assert student.email == "foo@bar.com"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
def test_student_import_csv_spaces(self, db, temp_cwd):
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name, email
foo,abc,xyz,foo@bar.com
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyz"
assert student.first_name == "abc"
assert student.email == "foo@bar.com"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
def test_assignment_add(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate is None
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
def test_assignment_remove(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate is None
run_nbgrader(["db", "assignment", "remove", "foo", "--db", db])
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("foo")
# running it again should give an error
run_nbgrader(["db", "assignment", "remove", "foo", "--db", db], retcode=1)
def test_assignment_remove_with_submissions(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# it should fail if we don't run with --force
run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
# make sure we can still find the assignment
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# now force it to complete
run_nbgrader(["db", "assignment", "remove", "ps1", "--force", "--db", db])
# assignment should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
def test_assignment_remove_with_submissions_f(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# it should fail if we don't run with --force
run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
# make sure we can still find the assignment
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# now force it to complete
run_nbgrader(["db", "assignment", "remove", "ps1", "-f", "--db", db])
# assignment should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
def test_assignment_list(self, db):
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
run_nbgrader(["db", "assignment", "add", "bar", "--db", db])
out = run_nbgrader(["db", "assignment", "list", "--db", db], stdout=True)
assert out == dedent(
"""
There are 2 assignments in the database:
bar (due: None)
foo (due: 2017-01-08 16:31:22)
"""
).strip() + "\n"
def test_assignment_import(self, db, temp_cwd):
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name,duedate
foo,Sun Jan 8 2017 4:31:22 PM
bar,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
def test_assignment_import_csv_spaces(self, db, temp_cwd):
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name, duedate
foo,Sun Jan 8 2017 4:31:22 PM
bar,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
# check that it fails when no id column is given
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
duedate
Sun Jan 8 2017 4:31:22 PM
,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db], retcode=1)
# check that it works ok with extra and missing columns
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name
foo
bar
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
def test_upgrade_nodb(self, temp_cwd):
# test upgrading without a database
run_nbgrader(["db", "upgrade"])
def test_upgrade_current_db(self, course_dir):
# add assignment files
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
# check that nbgrader generate_assignment passes
run_nbgrader(["generate_assignment", "ps1"])
# test upgrading with a current database
run_nbgrader(["db", "upgrade"])
def test_upgrade_old_db_no_assign(self, course_dir):
# add assignment files
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
# replace the gradebook with an old version
self._copy_file(join("files", "gradebook.db"), join(course_dir, "gradebook.db"))
# upgrade the database
run_nbgrader(["db", "upgrade"])
# check that nbgrader assign passes
run_nbgrader(["assign", "ps1"])
def test_upgrade_old_db(self, course_dir):
# add assignment files
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
# replace the gradebook with an old version
self._copy_file(join("files", "gradebook.db"), join(course_dir, "gradebook.db"))
# check that nbgrader generate_assignment fails
run_nbgrader(["generate_assignment", "ps1"], retcode=1)
# upgrade the database
run_nbgrader(["db", "upgrade"])
# check that nbgrader generate_assignment passes
run_nbgrader(["generate_assignment", "ps1"])
| bsd-3-clause | a26dcdb6e9b65a92e527e1488e48581a | 39.737589 | 127 | 0.539055 | 3.681265 | false | true | false | false |
dask/distributed | distributed/http/tests/test_routing.py | 1 | 1192 | from __future__ import annotations
import pytest
from tornado import web
from tornado.httpclient import AsyncHTTPClient, HTTPClientError
from distributed.http.routing import RoutingApplication
from distributed.utils_test import gen_test
class OneHandler(web.RequestHandler):
def get(self):
self.write("one")
class TwoHandler(web.RequestHandler):
def get(self):
self.write("two")
@gen_test()
async def test_basic():
application = RoutingApplication([(r"/one", OneHandler)])
two = web.Application([(r"/two", TwoHandler)])
server = application.listen(1234)
client = AsyncHTTPClient("http://localhost:1234")
response = await client.fetch("http://localhost:1234/one")
assert response.body.decode() == "one"
with pytest.raises(HTTPClientError):
response = await client.fetch("http://localhost:1234/two")
application.applications.append(two)
response = await client.fetch("http://localhost:1234/two")
assert response.body.decode() == "two"
application.add_handlers(".*", [(r"/three", OneHandler, {})])
response = await client.fetch("http://localhost:1234/three")
assert response.body.decode() == "one"
| bsd-3-clause | 3860dd4d31e13331690b2cb7c7d2f822 | 28.073171 | 66 | 0.699664 | 4.013468 | false | true | false | false |
dask/distributed | distributed/shuffle/_limiter.py | 1 | 2359 | from __future__ import annotations
import asyncio
from distributed.metrics import time
class ResourceLimiter:
"""Limit an abstract resource
This allows us to track usage of an abstract resource. If the usage of this
resources goes beyond a defined maxvalue, we can block further execution
Example::
limiter = ResourceLimiter(2)
limiter.increase(1)
limiter.increase(2)
limiter.decrease(1)
# This will block since we're still not below maxvalue
await limiter.wait_for_available()
"""
def __init__(self, maxvalue: int) -> None:
self._maxvalue = maxvalue
self._acquired = 0
self._condition = asyncio.Condition()
self._waiters = 0
self.time_blocked_total = 0.0
self.time_blocked_avg = 0.0
def __repr__(self) -> str:
return f"<ResourceLimiter maxvalue: {self._maxvalue} available: {self.available()}>"
def available(self) -> int:
"""How far can the value be increased before blocking"""
return max(0, self._maxvalue - self._acquired)
def free(self) -> bool:
"""Return True if nothing has been acquired / the limiter is in a neutral state"""
return self._acquired == 0
async def wait_for_available(self) -> None:
"""Block until the counter drops below maxvalue"""
start = time()
duration = 0
try:
if self.available():
return
async with self._condition:
self._waiters += 1
await self._condition.wait_for(self.available)
self._waiters -= 1
duration = time() - start
finally:
self.time_blocked_total += duration
self.time_blocked_avg = self.time_blocked_avg * 0.9 + duration * 0.1
def increase(self, value: int) -> None:
"""Increase the internal counter by value"""
self._acquired += value
async def decrease(self, value: int) -> None:
"""Decrease the internal counter by value"""
if value > self._acquired:
raise RuntimeError(
f"Cannot release more than what was acquired! release: {value} acquired: {self._acquired}"
)
self._acquired -= value
async with self._condition:
self._condition.notify_all()
| bsd-3-clause | 638883f129bd5a107b887c3a5a913059 | 31.763889 | 106 | 0.593048 | 4.235189 | false | false | false | false |
jschneier/django-storages | storages/backends/gcloud.py | 1 | 12276 | import mimetypes
import warnings
from datetime import timedelta
from tempfile import SpooledTemporaryFile
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import File
from django.utils import timezone
from django.utils.deconstruct import deconstructible
from storages.base import BaseStorage
from storages.compress import CompressedFileMixin
from storages.compress import CompressStorageMixin
from storages.utils import check_location
from storages.utils import clean_name
from storages.utils import get_available_overwrite_name
from storages.utils import is_seekable
from storages.utils import safe_join
from storages.utils import setting
from storages.utils import to_bytes
try:
from google.cloud.exceptions import NotFound
from google.cloud.storage import Blob
from google.cloud.storage import Client
from google.cloud.storage.blob import _quote
from google.cloud.storage.retry import DEFAULT_RETRY
except ImportError:
raise ImproperlyConfigured("Could not load Google Cloud Storage bindings.\n"
"See https://github.com/GoogleCloudPlatform/gcloud-python")
CONTENT_ENCODING = 'content_encoding'
CONTENT_TYPE = 'content_type'
class GoogleCloudFile(CompressedFileMixin, File):
def __init__(self, name, mode, storage):
self.name = name
self.mime_type = mimetypes.guess_type(name)[0]
self._mode = mode
self._storage = storage
self.blob = storage.bucket.get_blob(
name, chunk_size=storage.blob_chunk_size)
if not self.blob and 'w' in mode:
self.blob = Blob(
self.name, storage.bucket,
chunk_size=storage.blob_chunk_size)
self._file = None
self._is_dirty = False
@property
def size(self):
return self.blob.size
def _get_file(self):
if self._file is None:
self._file = SpooledTemporaryFile(
max_size=self._storage.max_memory_size,
suffix=".GSStorageFile",
dir=setting("FILE_UPLOAD_TEMP_DIR")
)
if 'r' in self._mode:
self._is_dirty = False
self.blob.download_to_file(self._file)
self._file.seek(0)
if self._storage.gzip and self.blob.content_encoding == 'gzip':
self._file = self._decompress_file(mode=self._mode, file=self._file)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
def read(self, num_bytes=None):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
if num_bytes is None:
num_bytes = -1
return super().read(num_bytes)
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
return super().write(to_bytes(content))
def close(self):
if self._file is not None:
if self._is_dirty:
blob_params = self._storage.get_object_parameters(self.name)
self.blob.upload_from_file(
self.file, rewind=True, content_type=self.mime_type,
retry=DEFAULT_RETRY,
predefined_acl=blob_params.get('acl', self._storage.default_acl))
self._file.close()
self._file = None
@deconstructible
class GoogleCloudStorage(CompressStorageMixin, BaseStorage):
def __init__(self, **settings):
super().__init__(**settings)
check_location(self)
self._bucket = None
self._client = None
def get_default_settings(self):
return {
"project_id": setting('GS_PROJECT_ID'),
"credentials": setting('GS_CREDENTIALS'),
"bucket_name": setting('GS_BUCKET_NAME'),
"custom_endpoint": setting('GS_CUSTOM_ENDPOINT', None),
"location": setting('GS_LOCATION', ''),
"default_acl": setting('GS_DEFAULT_ACL'),
"querystring_auth": setting('GS_QUERYSTRING_AUTH', True),
"expiration": setting('GS_EXPIRATION', timedelta(seconds=86400)),
"gzip": setting('GS_IS_GZIPPED', False),
"gzip_content_types": setting('GZIP_CONTENT_TYPES', (
'text/css',
'text/javascript',
'application/javascript',
'application/x-javascript',
'image/svg+xml',
)),
"file_overwrite": setting('GS_FILE_OVERWRITE', True),
"cache_control": setting('GS_CACHE_CONTROL'),
"object_parameters": setting('GS_OBJECT_PARAMETERS', {}),
# The max amount of memory a returned file can take up before being
# rolled over into a temporary file on disk. Default is 0: Do not
# roll over.
"max_memory_size": setting('GS_MAX_MEMORY_SIZE', 0),
"blob_chunk_size": setting('GS_BLOB_CHUNK_SIZE'),
}
@property
def client(self):
if self._client is None:
self._client = Client(
project=self.project_id,
credentials=self.credentials
)
return self._client
@property
def bucket(self):
if self._bucket is None:
self._bucket = self.client.bucket(self.bucket_name)
return self._bucket
def _normalize_name(self, name):
"""
Normalizes the name so that paths like /path/to/ignored/../something.txt
and ./file.txt work. Note that clean_name adds ./ to some paths so
they need to be fixed here. We check to make sure that the path pointed
to is not outside the directory specified by the LOCATION setting.
"""
try:
return safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." %
name)
def _open(self, name, mode='rb'):
name = self._normalize_name(clean_name(name))
file_object = GoogleCloudFile(name, mode, self)
if not file_object.blob:
raise FileNotFoundError('File does not exist: %s' % name)
return file_object
def _save(self, name, content):
cleaned_name = clean_name(name)
name = self._normalize_name(cleaned_name)
content.name = cleaned_name
file_object = GoogleCloudFile(name, 'rw', self)
upload_params = {}
blob_params = self.get_object_parameters(name)
upload_params['predefined_acl'] = blob_params.pop('acl', self.default_acl)
upload_params[CONTENT_TYPE] = blob_params.pop(CONTENT_TYPE, file_object.mime_type)
if (self.gzip and
upload_params[CONTENT_TYPE] in self.gzip_content_types and
CONTENT_ENCODING not in blob_params):
content = self._compress_content(content)
blob_params[CONTENT_ENCODING] = 'gzip'
for prop, val in blob_params.items():
setattr(file_object.blob, prop, val)
rewind = is_seekable(content)
file_object.blob.upload_from_file(
content,
rewind=rewind,
retry=DEFAULT_RETRY,
size=getattr(content, 'size', None),
**upload_params
)
return cleaned_name
def get_object_parameters(self, name):
"""Override this to return a dictionary of overwritable blob-property to value.
Returns GS_OBJECT_PARAMETRS by default. See the docs for all possible options.
"""
object_parameters = self.object_parameters.copy()
if 'cache_control' not in object_parameters and self.cache_control:
warnings.warn(
'The GS_CACHE_CONTROL setting is deprecated. Use GS_OBJECT_PARAMETERS to set any '
'writable blob property or override GoogleCloudStorage.get_object_parameters to '
'vary the parameters per object.', DeprecationWarning
)
object_parameters['cache_control'] = self.cache_control
return object_parameters
def delete(self, name):
name = self._normalize_name(clean_name(name))
try:
self.bucket.delete_blob(name, retry=DEFAULT_RETRY)
except NotFound:
pass
def exists(self, name):
if not name: # root element aka the bucket
try:
self.client.get_bucket(self.bucket)
return True
except NotFound:
return False
name = self._normalize_name(clean_name(name))
return bool(self.bucket.get_blob(name))
def listdir(self, name):
name = self._normalize_name(clean_name(name))
# For bucket.list_blobs and logic below name needs to end in /
# but for the root path "" we leave it as an empty string
if name and not name.endswith('/'):
name += '/'
iterator = self.bucket.list_blobs(prefix=name, delimiter='/')
blobs = list(iterator)
prefixes = iterator.prefixes
files = []
dirs = []
for blob in blobs:
parts = blob.name.split("/")
files.append(parts[-1])
for folder_path in prefixes:
parts = folder_path.split("/")
dirs.append(parts[-2])
return list(dirs), files
def _get_blob(self, name):
# Wrap google.cloud.storage's blob to raise if the file doesn't exist
blob = self.bucket.get_blob(name)
if blob is None:
raise NotFound('File does not exist: {}'.format(name))
return blob
def size(self, name):
name = self._normalize_name(clean_name(name))
blob = self._get_blob(name)
return blob.size
def modified_time(self, name):
name = self._normalize_name(clean_name(name))
blob = self._get_blob(name)
return timezone.make_naive(blob.updated)
def get_modified_time(self, name):
name = self._normalize_name(clean_name(name))
blob = self._get_blob(name)
updated = blob.updated
return updated if setting('USE_TZ') else timezone.make_naive(updated)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
name = self._normalize_name(clean_name(name))
blob = self._get_blob(name)
created = blob.time_created
return created if setting('USE_TZ') else timezone.make_naive(created)
def url(self, name, parameters=None):
"""
Return public url or a signed url for the Blob.
This DOES NOT check for existance of Blob - that makes codes too slow
for many use cases.
"""
name = self._normalize_name(clean_name(name))
blob = self.bucket.blob(name)
blob_params = self.get_object_parameters(name)
no_signed_url = (
blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth)
if not self.custom_endpoint and no_signed_url:
return blob.public_url
elif no_signed_url:
return '{storage_base_url}/{quoted_name}'.format(
storage_base_url=self.custom_endpoint,
quoted_name=_quote(name, safe=b"/~"),
)
else:
default_params = {
"bucket_bound_hostname": self.custom_endpoint,
"expiration": self.expiration,
"version": "v4",
}
params = parameters or {}
for key, value in default_params.items():
if value and key not in params:
params[key] = value
return blob.generate_signed_url(**params)
def get_available_name(self, name, max_length=None):
name = clean_name(name)
if self.file_overwrite:
return get_available_overwrite_name(name, max_length)
return super().get_available_name(name, max_length)
| bsd-3-clause | 4b03dba852b86f7f82c4b9ce53f84223 | 35.319527 | 98 | 0.59653 | 4.191192 | false | false | false | false |
cherrypy/cherrypy | cherrypy/_cpreqbody.py | 8 | 36382 | """Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short,
:attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of
:class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the
'image' types altogether. If neither the full type nor the major type has a
matching processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
'''Read application/json data into request.json.'''
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the
``processors`` dictionary. All processor functions take a single argument,
the ``Entity`` instance they are to process. It will be called whenever a
request is received (for those URI's where the tool is turned on) which
has a ``Content-Type`` of "application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid),
then reads the remaining bytes on the socket. The ``fp`` object knows its
own length, so it won't hang waiting for data that never arrives. It will
return when all data has been read. Then, we decode those bytes using
Python's built-in ``json`` module, and stick the decoded result onto
``request.json`` . If it cannot be decoded, we raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the
``processors`` dict so that request entities of other ``Content-Types``
aren't parsed at all. Since there's no entry for those invalid MIME
types, the ``default_proc`` method of ``cherrypy.request.body`` is
called. But this does nothing by default (usually to provide the page
handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace
``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``.
Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way,
not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
from urllib.parse import unquote
import cheroot.server
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.lib import httputil
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(b'+', b' ')
atoms = bs.split(b'%')
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return b''.join(atoms)
# ------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(b'&'):
for pair in aparam.split(b';'):
if not pair:
continue
atoms = pair.split(b'=', 1)
if len(atoms) == 1:
atoms.append(b'')
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, 'The request entity could not be decoded. The following '
'charsets were attempted: %s' % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ''
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match('^[ -~]{0,200}[!-~]$', ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params.
"""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# -------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`.
This uses the ``content_type`` of the Entity to look up a suitable
processor in
:attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`,
a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method
of the Entity is called (which does nothing by default; you can
override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = \
_cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
r"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the Content-Type
given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type
"multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing of
multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type
# charset
dec = self.content_type.params.get('charset', None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if (
clen is not None and
'chunked' not in headers.get('Transfer-Encoding', '')
):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if (
self.filename.startswith('"') and
self.filename.endswith('"')
):
self.filename = self.filename[1:-1]
if 'filename*' in disp.params:
# @see https://tools.ietf.org/html/rfc5987
encoding, lang, filename = disp.params['filename*'].split("'")
self.filename = unquote(str(filename), encoding)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
value = self.decode_entity(value)
return value
def decode_entity(self, value):
"""Return a given byte encoded value as a string"""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return value
else:
raise cherrypy.HTTPError(
400,
'The request entity could not be decoded. The following '
'charsets were attempted: %s' % repr(self.attempt_charsets)
)
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
r"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store
its data in a file (generated by
:func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi`
module in Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
@classmethod
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
@classmethod
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError('Illegal end of headers.')
if line == b'\r\n':
# Normal end of headers
break
if not line.endswith(b'\r\n'):
raise ValueError('MIME requires CRLF terminators: %r' % line)
if line[0] in b' \t':
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(b':', 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ', '.join((existing, v))
headers[k] = v
return headers
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and that fp is returned.
"""
endmarker = self.boundary + b'--'
delim = b''
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError('Illegal end of multipart body.')
if line.startswith(b'--') and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(b'\r\n'):
delim = b'\r\n'
line = line[:-2]
prev_lf = True
elif line.endswith(b'\n'):
delim = b'\n'
line = line[:-1]
prev_lf = True
else:
delim = b''
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = b''.join(lines)
return result
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, bytes):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
inf = float('inf')
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE,
has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = b''
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return b''
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = b''
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, 'Maximum request length: %r' % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return b''.join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(b'\n') + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return b''.join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in b' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(b':', 1)
except ValueError:
raise ValueError('Illegal header line.')
k = k.strip().title()
v = v.strip()
if k in cheroot.server.comma_separated_headers:
existing = self.trailers.get(k)
if existing:
v = b', '.join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, 'Maximum request length: %r' % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| bsd-3-clause | 1382b61681917e958d824b0c8ca6ac20 | 35.638469 | 79 | 0.586032 | 4.27119 | false | false | false | false |
cherrypy/cherrypy | cherrypy/lib/httputil.py | 1 | 18007 | """HTTP library functions.
This module contains functions for building an HTTP application
framework: any one, not just one whose name starts with "Ch". ;) If you
reference any modules from some popular framework inside *this* module,
FuManChu will personally hang you up by your thumbs and submit you
to a public caning.
"""
import functools
import email.utils
import re
import builtins
from binascii import b2a_base64
from cgi import parse_header
from email.header import decode_header
from http.server import BaseHTTPRequestHandler
from urllib.parse import unquote_plus
import jaraco.collections
import cherrypy
from cherrypy._cpcompat import ntob, ntou
response_codes = BaseHTTPRequestHandler.responses.copy()
# From https://github.com/cherrypy/cherrypy/issues/361
response_codes[500] = ('Internal Server Error',
'The server encountered an unexpected condition '
'which prevented it from fulfilling the request.')
response_codes[503] = ('Service Unavailable',
'The server is currently unable to handle the '
'request due to a temporary overloading or '
'maintenance of the server.')
HTTPDate = functools.partial(email.utils.formatdate, usegmt=True)
def urljoin(*atoms):
r"""Return the given path \*atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = '/'.join([x for x in atoms if x])
while '//' in url:
url = url.replace('//', '/')
# Special-case the final url of "", and return "/" instead.
return url or '/'
def urljoin_bytes(*atoms):
"""Return the given path `*atoms`, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = b'/'.join([x for x in atoms if x])
while b'//' in url:
url = url.replace(b'//', b'/')
# Special-case the final url of "", and return "/" instead.
return url or b'/'
def protocol_from_http(protocol_str):
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
return int(protocol_str[5]), int(protocol_str[7])
def get_ranges(headervalue, content_length):
"""Return a list of (start, stop) indices from a Range header, or None.
Each (start, stop) tuple will be composed of two ints, which are suitable
for use in a slicing operation. That is, the header "Range: bytes=3-6",
if applied against a Python string, is requesting resource[3:7]. This
function will return the list [(3, 7)].
If this function returns an empty list, you should return HTTP 416.
"""
if not headervalue:
return None
result = []
bytesunit, byteranges = headervalue.split('=', 1)
for brange in byteranges.split(','):
start, stop = [x.strip() for x in brange.split('-', 1)]
if start:
if not stop:
stop = content_length - 1
start, stop = int(start), int(stop)
if start >= content_length:
# From rfc 2616 sec 14.16:
# "If the server receives a request (other than one
# including an If-Range request-header field) with an
# unsatisfiable Range request-header field (that is,
# all of whose byte-range-spec values have a first-byte-pos
# value greater than the current length of the selected
# resource), it SHOULD return a response code of 416
# (Requested range not satisfiable)."
continue
if stop < start:
# From rfc 2616 sec 14.16:
# "If the server ignores a byte-range-spec because it
# is syntactically invalid, the server SHOULD treat
# the request as if the invalid Range header field
# did not exist. (Normally, this means return a 200
# response containing the full entity)."
return None
result.append((start, stop + 1))
else:
if not stop:
# See rfc quote above.
return None
# Negative subscript (last N bytes)
#
# RFC 2616 Section 14.35.1:
# If the entity is shorter than the specified suffix-length,
# the entire entity-body is used.
if int(stop) > content_length:
result.append((0, content_length))
else:
result.append((content_length - int(stop), content_length))
return result
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
return builtins.cmp(self.value, other.value)
def __lt__(self, other):
return self.value < other.value
def __str__(self):
p = [';%s=%s' % (k, v) for k, v in self.params.items()]
return str('%s%s' % (self.value, ''.join(p)))
def __bytes__(self):
return ntob(self.__str__())
def __unicode__(self):
return ntou(self.__str__())
@staticmethod
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
initial_value, params = parse_header(elementstr)
return initial_value, params
@classmethod
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
q_separator = re.compile(r'; *q *=')
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
@classmethod
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params['q'] = qvalue
return cls(media_type, params)
@property
def qvalue(self):
'The qvalue, or priority, of this value.'
val = self.params.get('q', '1')
if isinstance(val, HeaderElement):
val = val.value
try:
return float(val)
except ValueError as val_err:
"""Fail client requests with invalid quality value.
Ref: https://github.com/cherrypy/cherrypy/issues/1370
"""
raise cherrypy.HTTPError(
400,
'Malformed HTTP header: `{}`'.
format(str(self)),
) from val_err
def __cmp__(self, other):
diff = builtins.cmp(self.qvalue, other.qvalue)
if diff == 0:
diff = builtins.cmp(str(self), str(other))
return diff
def __lt__(self, other):
if self.qvalue == other.qvalue:
return str(self) < str(other)
else:
return self.qvalue < other.qvalue
RE_HEADER_SPLIT = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)')
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in RE_HEADER_SPLIT.split(fieldvalue):
if fieldname.startswith('Accept') or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
def decode_TEXT(value):
r"""
Decode :rfc:`2047` TEXT
>>> decode_TEXT("=?utf-8?q?f=C3=BCr?=") == b'f\xfcr'.decode('latin-1')
True
"""
atoms = decode_header(value)
decodedvalue = ''
for atom, charset in atoms:
if charset is not None:
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
def decode_TEXT_maybe(value):
"""
Decode the text but only if '=?' appears in it.
"""
return decode_TEXT(value) if '=?' in value else value
def valid_status(status):
"""Return legal HTTP status Code, Reason-phrase and Message.
The status arg must be an int, a str that begins with an int
or the constant from ``http.client`` stdlib module.
If status has no reason-phrase is supplied, a default reason-
phrase will be provided.
>>> import http.client
>>> from http.server import BaseHTTPRequestHandler
>>> valid_status(http.client.ACCEPTED) == (
... int(http.client.ACCEPTED),
... ) + BaseHTTPRequestHandler.responses[http.client.ACCEPTED]
True
"""
if not status:
status = 200
code, reason = status, None
if isinstance(status, str):
code, _, reason = status.partition(' ')
reason = reason.strip() or None
try:
code = int(code)
except (TypeError, ValueError):
raise ValueError('Illegal response status from server '
'(%s is non-numeric).' % repr(code))
if code < 100 or code > 599:
raise ValueError('Illegal response status from server '
'(%s is out of range).' % repr(code))
if code not in response_codes:
# code is unknown but not illegal
default_reason, message = '', ''
else:
default_reason, message = response_codes[code]
if reason is None:
reason = default_reason
return code, reason, message
# NOTE: the parse_qs functions that follow are modified version of those
# in the python3.0 source - we need to pass through an encoding to the unquote
# method, but the default parse_qs function doesn't allow us to. These do.
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a dict, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
d = {}
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote_plus(nv[0], encoding, errors='strict')
value = unquote_plus(nv[1], encoding, errors='strict')
if name in d:
if not isinstance(d[name], list):
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
return d
image_map_pattern = re.compile(r'[0-9]+,[0-9]+')
def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
"""Build a params dictionary from a query_string.
Duplicate key/value pairs in the provided query_string will be
returned as {'key': [val1, val2, ...]}. Single key/values will
be returned as strings: {'key': 'value'}.
"""
if image_map_pattern.match(query_string):
# Server-side image map. Map the coords to 'x' and 'y'
# (like CGI::Request does).
pm = query_string.split(',')
pm = {'x': int(pm[0]), 'y': int(pm[1])}
else:
pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)
return pm
class CaseInsensitiveDict(jaraco.collections.KeyTransformingDict):
"""A case-insensitive dict subclass.
Each key is changed on entry to title case.
"""
@staticmethod
def transform_key(key):
if key is None:
# TODO(#1830): why?
return 'None'
return key.title()
# TEXT = <any OCTET except CTLs, but including LWS>
#
# A CRLF is allowed in the definition of TEXT only as part of a header
# field continuation. It is expected that the folding LWS will be
# replaced with a single SP before interpretation of the TEXT value."
if str == bytes:
header_translate_table = ''.join([chr(i) for i in range(256)])
header_translate_deletechars = ''.join(
[chr(i) for i in range(32)]) + chr(127)
else:
header_translate_table = None
header_translate_deletechars = bytes(range(32)) + bytes([127])
class HeaderMap(CaseInsensitiveDict):
"""A dict subclass for HTTP request and response headers.
Each key is changed on entry to str(key).title(). This allows headers
to be case-insensitive and avoid duplicates.
Values are header values (decoded according to :rfc:`2047` if necessary).
"""
protocol = (1, 1)
encodings = ['ISO-8859-1']
# Someday, when http-bis is done, this will probably get dropped
# since few servers, clients, or intermediaries do it. But until then,
# we're going to obey the spec as is.
# "Words of *TEXT MAY contain characters from character sets other than
# ISO-8859-1 only when encoded according to the rules of RFC 2047."
use_rfc_2047 = True
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
return header_elements(self.transform_key(key), self.get(key))
def values(self, key):
"""Return a sorted list of HeaderElement.value for the given header."""
return [e.value for e in self.elements(key)]
def output(self):
"""Transform self into a list of (name, value) tuples."""
return list(self.encode_header_items(self.items()))
@classmethod
def encode_header_items(cls, header_items):
"""
Prepare the sequence of name, value tuples into a form suitable for
transmitting on the wire for HTTP.
"""
for k, v in header_items:
if not isinstance(v, str) and not isinstance(v, bytes):
v = str(v)
yield tuple(map(cls.encode_header_item, (k, v)))
@classmethod
def encode_header_item(cls, item):
if isinstance(item, str):
item = cls.encode(item)
# See header_translate_* constants above.
# Replace only if you really know what you're doing.
return item.translate(
header_translate_table, header_translate_deletechars)
@classmethod
def encode(cls, v):
"""Return the given header name or value, encoded for HTTP output."""
for enc in cls.encodings:
try:
return v.encode(enc)
except UnicodeEncodeError:
continue
if cls.protocol == (1, 1) and cls.use_rfc_2047:
# Encode RFC-2047 TEXT
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
# We do our own here instead of using the email module
# because we never want to fold lines--folding has
# been deprecated by the HTTP working group.
v = b2a_base64(v.encode('utf-8'))
return (b'=?utf-8?b?' + v.strip(b'\n') + b'?=')
raise ValueError('Could not encode header part %r using '
'any of the encodings %r.' %
(v, cls.encodings))
class Host(object):
"""An internet address.
name
Should be the client's host name. If not available (because no DNS
lookup is performed), the IP address should be used instead.
"""
ip = '0.0.0.0'
port = 80
name = 'unknown.tld'
def __init__(self, ip, port, name=None):
self.ip = ip
self.port = port
if name is None:
name = ip
self.name = name
def __repr__(self):
return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name)
class SanitizedHost(str):
r"""
Wraps a raw host header received from the network in
a sanitized version that elides dangerous characters.
>>> SanitizedHost('foo\nbar')
'foobar'
>>> SanitizedHost('foo\nbar').raw
'foo\nbar'
A SanitizedInstance is only returned if sanitization was performed.
>>> isinstance(SanitizedHost('foobar'), SanitizedHost)
False
"""
dangerous = re.compile(r'[\n\r]')
def __new__(cls, raw):
sanitized = cls._sanitize(raw)
if sanitized == raw:
return raw
instance = super().__new__(cls, sanitized)
instance.raw = raw
return instance
@classmethod
def _sanitize(cls, raw):
return cls.dangerous.sub('', raw)
| bsd-3-clause | c6748796536cbfc6b65083ae4f26fca8 | 31.859489 | 79 | 0.603099 | 4.060203 | false | false | false | false |
cherrypy/cherrypy | cherrypy/test/test_config_server.py | 12 | 4037 | """Tests for the CherryPy configuration system."""
import os
import cherrypy
from cherrypy.test import helper
localDir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# Client-side code #
class ServerConfigTests(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return cherrypy.request.wsgi_environ['SERVER_PORT']
@cherrypy.expose
def upload(self, file):
return 'Size: %s' % len(file.file.read())
@cherrypy.expose
@cherrypy.config(**{'request.body.maxbytes': 100})
def tinyupload(self):
return cherrypy.request.body.read()
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': 9876,
'server.max_request_body_size': 200,
'server.max_request_header_size': 500,
'server.socket_timeout': 0.5,
# Test explicit server.instance
'server.2.instance': 'cherrypy._cpwsgi_server.CPWSGIServer',
'server.2.socket_port': 9877,
# Test non-numeric <servername>
# Also test default server.instance = builtin server
'server.yetanother.socket_port': 9878,
})
PORT = 9876
def testBasicConfig(self):
self.getPage('/')
self.assertBody(str(self.PORT))
def testAdditionalServers(self):
if self.scheme == 'https':
return self.skip('not available under ssl')
self.PORT = 9877
self.getPage('/')
self.assertBody(str(self.PORT))
self.PORT = 9878
self.getPage('/')
self.assertBody(str(self.PORT))
def testMaxRequestSizePerHandler(self):
if getattr(cherrypy.server, 'using_apache', False):
return self.skip('skipped due to known Apache differences... ')
self.getPage('/tinyupload', method='POST',
headers=[('Content-Type', 'text/plain'),
('Content-Length', '100')],
body='x' * 100)
self.assertStatus(200)
self.assertBody('x' * 100)
self.getPage('/tinyupload', method='POST',
headers=[('Content-Type', 'text/plain'),
('Content-Length', '101')],
body='x' * 101)
self.assertStatus(413)
def testMaxRequestSize(self):
if getattr(cherrypy.server, 'using_apache', False):
return self.skip('skipped due to known Apache differences... ')
for size in (500, 5000, 50000):
self.getPage('/', headers=[('From', 'x' * 500)])
self.assertStatus(413)
# Test for https://github.com/cherrypy/cherrypy/issues/421
# (Incorrect border condition in readline of SizeCheckWrapper).
# This hangs in rev 891 and earlier.
lines256 = 'x' * 248
self.getPage('/',
headers=[('Host', '%s:%s' % (self.HOST, self.PORT)),
('From', lines256)])
# Test upload
cd = (
'Content-Disposition: form-data; '
'name="file"; '
'filename="hello.txt"'
)
body = '\r\n'.join([
'--x',
cd,
'Content-Type: text/plain',
'',
'%s',
'--x--'])
partlen = 200 - len(body)
b = body % ('x' * partlen)
h = [('Content-type', 'multipart/form-data; boundary=x'),
('Content-Length', '%s' % len(b))]
self.getPage('/upload', h, 'POST', b)
self.assertBody('Size: %d' % partlen)
b = body % ('x' * 200)
h = [('Content-type', 'multipart/form-data; boundary=x'),
('Content-Length', '%s' % len(b))]
self.getPage('/upload', h, 'POST', b)
self.assertStatus(413)
| bsd-3-clause | bf0c20c773ed31ef37b498fdd0269118 | 31.039683 | 76 | 0.513996 | 4.140513 | false | true | false | false |
cherrypy/cherrypy | cherrypy/test/test_routes.py | 12 | 2583 | """Test Routes dispatcher."""
import os
import importlib
import pytest
import cherrypy
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
class RoutesDispatchTest(helper.CPWebCase):
"""Routes dispatcher test suite."""
@staticmethod
def setup_server():
"""Set up cherrypy test instance."""
try:
importlib.import_module('routes')
except ImportError:
pytest.skip('Install routes to test RoutesDispatcher code')
class Dummy:
def index(self):
return 'I said good day!'
class City:
def __init__(self, name):
self.name = name
self.population = 10000
@cherrypy.config(**{
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Content-Language', 'en-GB'),
],
})
def index(self, **kwargs):
return 'Welcome to %s, pop. %s' % (self.name, self.population)
def update(self, **kwargs):
self.population = kwargs['pop']
return 'OK'
d = cherrypy.dispatch.RoutesDispatcher()
d.connect(action='index', name='hounslow', route='/hounslow',
controller=City('Hounslow'))
d.connect(
name='surbiton', route='/surbiton', controller=City('Surbiton'),
action='index', conditions=dict(method=['GET']))
d.mapper.connect('/surbiton', controller='surbiton',
action='update', conditions=dict(method=['POST']))
d.connect('main', ':action', controller=Dummy())
conf = {'/': {'request.dispatch': d}}
cherrypy.tree.mount(root=None, config=conf)
def test_Routes_Dispatch(self):
"""Check that routes package based URI dispatching works correctly."""
self.getPage('/hounslow')
self.assertStatus('200 OK')
self.assertBody('Welcome to Hounslow, pop. 10000')
self.getPage('/foo')
self.assertStatus('404 Not Found')
self.getPage('/surbiton')
self.assertStatus('200 OK')
self.assertBody('Welcome to Surbiton, pop. 10000')
self.getPage('/surbiton', method='POST', body='pop=1327')
self.assertStatus('200 OK')
self.assertBody('OK')
self.getPage('/surbiton')
self.assertStatus('200 OK')
self.assertHeader('Content-Language', 'en-GB')
self.assertBody('Welcome to Surbiton, pop. 1327')
| bsd-3-clause | 300f2a778fc83115718aec9449b67e4a | 31.2875 | 78 | 0.565234 | 4.248355 | false | true | false | false |
cherrypy/cherrypy | cherrypy/test/test_conn.py | 3 | 30744 | """Tests for TCP connection handling, including proper and timely close."""
import errno
import socket
import sys
import time
import urllib.parse
from http.client import BadStatusLine, HTTPConnection, NotConnected
from cheroot.test import webtest
import cherrypy
from cherrypy._cpcompat import HTTPSConnection, ntob, tonative
from cherrypy.test import helper
timeout = 1
pov = 'pPeErRsSiIsStTeEnNcCeE oOfF vViIsSiIoOnN'
def setup_server():
def raise500():
raise cherrypy.HTTPError(500)
class Root:
@cherrypy.expose
def index(self):
return pov
page1 = index
page2 = index
page3 = index
@cherrypy.expose
def hello(self):
return 'Hello, world!'
@cherrypy.expose
def timeout(self, t):
return str(cherrypy.server.httpserver.timeout)
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def stream(self, set_cl=False):
if set_cl:
cherrypy.response.headers['Content-Length'] = 10
def content():
for x in range(10):
yield str(x)
return content()
@cherrypy.expose
def error(self, code=500):
raise cherrypy.HTTPError(code)
@cherrypy.expose
def upload(self):
if not cherrypy.request.method == 'POST':
raise AssertionError("'POST' != request.method %r" %
cherrypy.request.method)
return "thanks for '%s'" % cherrypy.request.body.read()
@cherrypy.expose
def custom(self, response_code):
cherrypy.response.status = response_code
return 'Code = %s' % response_code
@cherrypy.expose
@cherrypy.config(**{'hooks.on_start_resource': raise500})
def err_before_read(self):
return 'ok'
@cherrypy.expose
def one_megabyte_of_a(self):
return ['a' * 1024] * 1024
@cherrypy.expose
# Turn off the encoding tool so it doens't collapse
# our response body and reclaculate the Content-Length.
@cherrypy.config(**{'tools.encode.on': False})
def custom_cl(self, body, cl):
cherrypy.response.headers['Content-Length'] = cl
if not isinstance(body, list):
body = [body]
newbody = []
for chunk in body:
if isinstance(chunk, str):
chunk = chunk.encode('ISO-8859-1')
newbody.append(chunk)
return newbody
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.max_request_body_size': 1001,
'server.socket_timeout': timeout,
})
class ConnectionCloseTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage('/')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Make another request on the same connection.
self.getPage('/page1')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Test client-side close.
self.getPage('/page2', headers=[('Connection', 'close')])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader('Connection', 'close')
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, '/')
def test_Streaming_no_len(self):
try:
self._streaming(set_cl=False)
finally:
try:
self.HTTP_CONN.close()
except (TypeError, AttributeError):
pass
def test_Streaming_with_len(self):
try:
self._streaming(set_cl=True)
finally:
try:
self.HTTP_CONN.close()
except (TypeError, AttributeError):
pass
def _streaming(self, set_cl):
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.PROTOCOL = 'HTTP/1.1'
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage('/')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should stream
# without closing the connection.
self.getPage('/stream?set_cl=Yes')
self.assertHeader('Content-Length')
self.assertNoHeader('Connection', 'close')
self.assertNoHeader('Transfer-Encoding')
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When no Content-Length response header is provided,
# streamed output will either close the connection, or use
# chunked encoding, to determine transfer-length.
self.getPage('/stream')
self.assertNoHeader('Content-Length')
self.assertStatus('200 OK')
self.assertBody('0123456789')
chunked_response = False
for k, v in self.headers:
if k.lower() == 'transfer-encoding':
if str(v) == 'chunked':
chunked_response = True
if chunked_response:
self.assertNoHeader('Connection', 'close')
else:
self.assertHeader('Connection', 'close')
# Make another request on the same connection, which should
# error.
self.assertRaises(NotConnected, self.getPage, '/')
# Try HEAD. See
# https://github.com/cherrypy/cherrypy/issues/864.
self.getPage('/stream', method='HEAD')
self.assertStatus('200 OK')
self.assertBody('')
self.assertNoHeader('Transfer-Encoding')
else:
self.PROTOCOL = 'HTTP/1.0'
self.persistent = True
# Make the first request and assert Keep-Alive.
self.getPage('/', headers=[('Connection', 'Keep-Alive')])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader('Connection', 'Keep-Alive')
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should
# stream without closing the connection.
self.getPage('/stream?set_cl=Yes',
headers=[('Connection', 'Keep-Alive')])
self.assertHeader('Content-Length')
self.assertHeader('Connection', 'Keep-Alive')
self.assertNoHeader('Transfer-Encoding')
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When a Content-Length is not provided,
# the server should close the connection.
self.getPage('/stream', headers=[('Connection', 'Keep-Alive')])
self.assertStatus('200 OK')
self.assertBody('0123456789')
self.assertNoHeader('Content-Length')
self.assertNoHeader('Connection', 'Keep-Alive')
self.assertNoHeader('Transfer-Encoding')
# Make another request on the same connection, which should
# error.
self.assertRaises(NotConnected, self.getPage, '/')
def test_HTTP10_KeepAlive(self):
self.PROTOCOL = 'HTTP/1.0'
if self.scheme == 'https':
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a normal HTTP/1.0 request.
self.getPage('/page2')
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
# self.assertNoHeader("Connection")
# Test a keep-alive HTTP/1.0 request.
self.persistent = True
self.getPage('/page3', headers=[('Connection', 'Keep-Alive')])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader('Connection', 'Keep-Alive')
# Remove the keep-alive header again.
self.getPage('/page3')
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
# self.assertNoHeader("Connection")
class PipelineTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11_Timeout(self):
# If we timeout without sending any data,
# the server will close the conn with a 408.
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Connect but send nothing.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
# Wait for our socket timeout
time.sleep(timeout * 2)
# The request should have returned 408 already.
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.assertEqual(response.status, 408)
conn.close()
# Connect but send half the headers only.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
conn.send(b'GET /hello HTTP/1.1')
conn.send(('Host: %s' % self.HOST).encode('ascii'))
# Wait for our socket timeout
time.sleep(timeout * 2)
# The conn should have already sent 408.
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.assertEqual(response.status, 408)
conn.close()
def test_HTTP11_Timeout_after_request(self):
# If we timeout after at least one request has succeeded,
# the server will close the conn without 408.
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Make an initial request
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest('GET', '/timeout?t=%s' % timeout, skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(str(timeout))
# Make a second request on the same socket
conn._output(b'GET /hello HTTP/1.1')
conn._output(ntob('Host: %s' % self.HOST, 'ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody('Hello, world!')
# Wait for our socket timeout
time.sleep(timeout * 2)
# Make another request on the same socket, which should error
conn._output(b'GET /hello HTTP/1.1')
conn._output(ntob('Host: %s' % self.HOST, 'ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method='GET')
msg = (
"Writing to timed out socket didn't fail as it should have: %s")
try:
response.begin()
except Exception:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail(msg % sys.exc_info()[1])
else:
if response.status != 408:
self.fail(msg % response.read())
conn.close()
# Make another request on a new socket, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest('GET', '/', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
# Make another request on the same socket,
# but timeout on the headers
conn.send(b'GET /hello HTTP/1.1')
# Wait for our socket timeout
time.sleep(timeout * 2)
response = conn.response_class(conn.sock, method='GET')
try:
response.begin()
except Exception:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail(msg % sys.exc_info()[1])
else:
if response.status != 408:
self.fail(msg % response.read())
conn.close()
# Retry the request on a new connection, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest('GET', '/', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
conn.close()
def test_HTTP11_pipelining(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Test pipelining. httplib doesn't support this directly.
self.persistent = True
conn = self.HTTP_CONN
# Put request 1
conn.putrequest('GET', '/hello', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
for trial in range(5):
# Put next request
conn._output(b'GET /hello HTTP/1.1')
conn._output(ntob('Host: %s' % self.HOST, 'ascii'))
conn._send_output()
# Retrieve previous response
response = conn.response_class(conn.sock, method='GET')
# there is a bug in python3 regarding the buffering of
# ``conn.sock``. Until that bug get's fixed we will
# monkey patch the ``response`` instance.
# https://bugs.python.org/issue23377
response.fp = conn.sock.makefile('rb', 0)
response.begin()
body = response.read(13)
self.assertEqual(response.status, 200)
self.assertEqual(body, b'Hello, world!')
# Retrieve final response
response = conn.response_class(conn.sock, method='GET')
response.begin()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, b'Hello, world!')
conn.close()
def test_100_Continue(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
self.persistent = True
conn = self.HTTP_CONN
# Try a page without an Expect request header first.
# Note that httplib's response.begin automatically ignores
# 100 Continue responses, so we must manually check for it.
try:
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '4')
conn.endheaders()
conn.send(ntob("d'oh"))
response = conn.response_class(conn.sock, method='POST')
version, status, reason = response._read_status()
self.assertNotEqual(status, 100)
finally:
conn.close()
# Now try a page with an Expect header...
try:
conn.connect()
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '17')
conn.putheader('Expect', '100-continue')
conn.endheaders()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
line = response.fp.readline().strip()
if line:
self.fail(
'100 Continue should not output any headers. Got %r' %
line)
else:
break
# ...send the body
body = b'I am a small file'
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % body)
finally:
conn.close()
class ConnectionTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_readall_or_close(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
if self.scheme == 'https':
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a max of 0 (the default) and then reset to what it was above.
old_max = cherrypy.server.max_request_body_size
for new_max in (0, old_max):
cherrypy.server.max_request_body_size = new_max
self.persistent = True
conn = self.HTTP_CONN
# Get a POST page with an error
conn.putrequest('POST', '/err_before_read', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '1000')
conn.putheader('Expect', '100-continue')
conn.endheaders()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
conn.send(ntob('x' * 1000))
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
# Now try a working page with an Expect header...
conn._output(b'POST /upload HTTP/1.1')
conn._output(ntob('Host: %s' % self.HOST, 'ascii'))
conn._output(b'Content-Type: text/plain')
conn._output(b'Content-Length: 17')
conn._output(b'Expect: 100-continue')
conn._send_output()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
body = b'I am a small file'
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % body)
conn.close()
def test_No_Message_Body(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage('/')
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader('Connection')
# Make a 204 request on the same connection.
self.getPage('/custom/204')
self.assertStatus(204)
self.assertNoHeader('Content-Length')
self.assertBody('')
self.assertNoHeader('Connection')
# Make a 304 request on the same connection.
self.getPage('/custom/304')
self.assertStatus(304)
self.assertNoHeader('Content-Length')
self.assertBody('')
self.assertNoHeader('Connection')
def test_Chunked_Encoding(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
if (hasattr(self, 'harness') and
'modpython' in self.harness.__class__.__name__.lower()):
# mod_python forbids chunked encoding
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
conn = self.HTTP_CONN
# Try a normal chunked request (with extensions)
body = ntob('8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n'
'Content-Type: application/json\r\n'
'\r\n')
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Transfer-Encoding', 'chunked')
conn.putheader('Trailer', 'Content-Type')
# Note that this is somewhat malformed:
# we shouldn't be sending Content-Length.
# RFC 2616 says the server should ignore it.
conn.putheader('Content-Length', '3')
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus('200 OK')
self.assertBody("thanks for '%s'" % b'xx\r\nxxxxyyyyy')
# Try a chunked request that exceeds server.max_request_body_size.
# Note that the delimiters and trailer are included.
body = ntob('3e3\r\n' + ('x' * 995) + '\r\n0\r\n\r\n')
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Transfer-Encoding', 'chunked')
conn.putheader('Content-Type', 'text/plain')
# Chunked requests don't need a content-length
# # conn.putheader("Content-Length", len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
conn.close()
def test_Content_Length_in(self):
# Try a non-chunked request where Content-Length exceeds
# server.max_request_body_size. Assert error before body send.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '9999')
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
self.assertBody('The entity sent with the request exceeds '
'the maximum allowed bytes.')
conn.close()
def test_Content_Length_out_preheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest('GET', '/custom_cl?body=I+have+too+many+bytes&cl=5',
skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
self.assertBody(
'The requested resource returned more bytes than the '
'declared Content-Length.')
conn.close()
def test_Content_Length_out_postheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest(
'GET', '/custom_cl?body=I+too&body=+have+too+many&cl=5',
skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody('I too')
conn.close()
def test_598(self):
tmpl = '{scheme}://{host}:{port}/one_megabyte_of_a/'
url = tmpl.format(
scheme=self.scheme,
host=self.HOST,
port=self.PORT,
)
remote_data_conn = urllib.request.urlopen(url)
buf = remote_data_conn.read(512)
time.sleep(timeout * 0.6)
remaining = (1024 * 1024) - 512
while remaining:
data = remote_data_conn.read(remaining)
if not data:
break
else:
buf += data
remaining -= len(data)
self.assertEqual(len(buf), 1024 * 1024)
self.assertEqual(buf, ntob('a' * 1024 * 1024))
self.assertEqual(remaining, 0)
remote_data_conn.close()
def setup_upload_server():
class Root:
@cherrypy.expose
def upload(self):
if not cherrypy.request.method == 'POST':
raise AssertionError("'POST' != request.method %r" %
cherrypy.request.method)
return "thanks for '%s'" % tonative(cherrypy.request.body.read())
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.max_request_body_size': 1001,
'server.socket_timeout': 10,
'server.accepted_queue_size': 5,
'server.accepted_queue_timeout': 0.1,
})
reset_names = 'ECONNRESET', 'WSAECONNRESET'
socket_reset_errors = [
getattr(errno, name)
for name in reset_names
if hasattr(errno, name)
]
'reset error numbers available on this platform'
socket_reset_errors += [
# Python 3.5 raises an http.client.RemoteDisconnected
# with this message
'Remote end closed connection without response',
]
class LimitedRequestQueueTests(helper.CPWebCase):
setup_server = staticmethod(setup_upload_server)
def test_queue_full(self):
conns = []
overflow_conn = None
try:
# Make 15 initial requests and leave them open, which should use
# all of wsgiserver's WorkerThreads and fill its Queue.
for i in range(15):
conn = self.HTTP_CONN(self.HOST, self.PORT)
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', self.HOST)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '4')
conn.endheaders()
conns.append(conn)
# Now try a 16th conn, which should be closed by the
# server immediately.
overflow_conn = self.HTTP_CONN(self.HOST, self.PORT)
# Manually connect since httplib won't let us set a timeout
for res in socket.getaddrinfo(self.HOST, self.PORT, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
overflow_conn.sock = socket.socket(af, socktype, proto)
overflow_conn.sock.settimeout(5)
overflow_conn.sock.connect(sa)
break
overflow_conn.putrequest('GET', '/', skip_host=True)
overflow_conn.putheader('Host', self.HOST)
overflow_conn.endheaders()
response = overflow_conn.response_class(
overflow_conn.sock,
method='GET',
)
try:
response.begin()
except socket.error as exc:
if exc.args[0] in socket_reset_errors:
pass # Expected.
else:
tmpl = (
'Overflow conn did not get RST. '
'Got {exc.args!r} instead'
)
raise AssertionError(tmpl.format(**locals()))
except BadStatusLine:
# This is a special case in OS X. Linux and Windows will
# RST correctly.
assert sys.platform == 'darwin'
else:
raise AssertionError('Overflow conn did not get RST ')
finally:
for conn in conns:
conn.send(b'done')
response = conn.response_class(conn.sock, method='POST')
response.begin()
self.body = response.read()
self.assertBody("thanks for 'done'")
self.assertEqual(response.status, 200)
conn.close()
if overflow_conn:
overflow_conn.close()
class BadRequestTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_No_CRLF(self):
self.persistent = True
conn = self.HTTP_CONN
conn.send(b'GET /hello HTTP/1.1\n\n')
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.body = response.read()
self.assertBody('HTTP requires CRLF terminators')
conn.close()
conn.connect()
conn.send(b'GET /hello HTTP/1.1\r\n\n')
response = conn.response_class(conn.sock, method='GET')
response.begin()
self.body = response.read()
self.assertBody('HTTP requires CRLF terminators')
conn.close()
| bsd-3-clause | eb9fecf9017805ed567e15f037908458 | 34.583333 | 79 | 0.561443 | 4.265261 | false | false | false | false |
mozilla/mozilla-ignite | apps/projects/migrations/0004_auto__add_field_project_blog.py | 2 | 7459 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.blog'
db.add_column('projects_project', 'blog', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.blog'
db.delete_column('projects_project', 'blog')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'blog': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'github': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.link': {
'Meta': {'object_name': 'Link'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service': ('django.db.models.fields.CharField', [], {'default': "u'other'", 'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirmation_token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Link']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['projects']
| bsd-3-clause | 91d97d253d0ded5b0a2a16bc5134c275 | 70.721154 | 182 | 0.548063 | 3.685277 | false | false | false | false |
mozilla/mozilla-ignite | apps/resources/views.py | 1 | 1180 | import jingo
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist
from jinja2.exceptions import TemplateNotFound
from resources.models import Resource
def object_list(request, template='resources/object_list.html'):
"""Lists the current resources"""
labs = Resource.objects.filter(
status=Resource.PUBLISHED,
resource_type=2
).order_by('-created')
links = Resource.objects.filter(
status=Resource.PUBLISHED,
resource_type=1
).order_by('title')
context = {
'labs': labs,
'links': links
}
return jingo.render(request, template, context)
def resource_page(request, slug, template='resources/pages/base.html'):
""" Grab the intended resource from the DB so we can render it """
try:
resource_page = Resource.objects.get(slug=slug)
except ObjectDoesNotExist:
raise Http404
context = {
'page_data': resource_page
}
template = 'resources/pages/%s' % resource_page.template
try:
return jingo.render(request, template, context)
except TemplateNotFound:
raise Http404
| bsd-3-clause | f50db18afce308a347f1841bd59050b6 | 26.44186 | 71 | 0.65678 | 4.199288 | false | false | false | false |
mozilla/mozilla-ignite | apps/projects/tasks.py | 1 | 2120 | import bleach
from celery.task import Task
from django_push.subscriber.models import Subscription, SubscriptionError
from projects.utils import PushFeedParser, FeedEntryParser
from feeds.models import Entry
# Whitelisted tags and attributes
TAGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'a', 'b', 'em',
'i', 'strong', 'ol', 'ul', 'li', 'hr', 'blockquote',
'p', 'span', 'pre', 'code', 'img')
ATTRIBUTES = {
'a': ['href', 'title'],
'img': ['src', 'alt']
}
class PushSubscriber(Task):
def run(self, link, **kwargs):
log = self.get_logger(**kwargs)
p = PushFeedParser(link.url)
p.parse()
try:
link.subscription = Subscription.objects.subscribe(
p.feed_url, hub=p.hub_url)
link.save()
except SubscriptionError, e:
log.warning('SubscriptionError. Retrying (%s)' % (link.url,))
log.warning('Error: %s' % (str(e),))
class PushUnsubscriber(Task):
def run(self, link, **kwargs):
log = self.get_logger(**kwargs)
if not link.subscription:
log.warning(
'Attempt to unsubscribe from link with no subscription: %s' % (
link.url,))
return
Subscription.objects.unsubscribe(link.url, hub=link.subscription.hub)
class PushNotificationHandler(Task):
def create_entry(self, entry, link):
content = bleach.clean(
entry.content, tags=TAGS, attributes=ATTRIBUTES, strip=True)
entry = Entry.objects.create(
title=entry.title, url=entry.link, body=content,
link=link, published=entry.updated)
return entry
def run(self, notification, sender, **kwargs):
log = self.get_logger(**kwargs)
if not isinstance(sender, Subscription):
return
for entry in notification.entries:
log.debug('Received notification of entry: %s, %s' % (
entry.title, entry.url))
parsed = FeedEntryParser(entry)
for link in sender.link_set.all():
self.create_entry(parsed, link)
| bsd-3-clause | d6af245dc6b60225c5cc54447efe5ff1 | 30.641791 | 79 | 0.587736 | 3.854545 | false | false | false | false |
mozilla/mozilla-ignite | vendor-local/lib/python/requests/sessions.py | 1 | 10866 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
from .compat import cookielib
from .cookies import cookiejar_from_dict, remove_cookie_by_name
from .defaults import defaults
from .models import Request
from .hooks import dispatch_hook
from .utils import header_expand
from .packages.urllib3.poolmanager import PoolManager
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
"""
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, str):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
# Update new values.
kwargs = default_kwarg.copy()
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k, v) in list(local_kwarg.items()):
if v is None:
del kwargs[k]
return kwargs
class Session(object):
"""A Requests session."""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'config', 'verify', 'cert', 'prefetch']
def __init__(self,
headers=None,
cookies=None,
auth=None,
timeout=None,
proxies=None,
hooks=None,
params=None,
config=None,
prefetch=False,
verify=True,
cert=None):
self.headers = headers or {}
self.auth = auth
self.timeout = timeout
self.proxies = proxies or {}
self.hooks = hooks or {}
self.params = params or {}
self.config = config or {}
self.prefetch = prefetch
self.verify = verify
self.cert = cert
for (k, v) in list(defaults.items()):
self.config.setdefault(k, v)
self.init_poolmanager()
# Set up a CookieJar to be used by default
if isinstance(cookies, cookielib.CookieJar):
self.cookies = cookies
else:
self.cookies = cookiejar_from_dict(cookies)
def init_poolmanager(self):
self.poolmanager = PoolManager(
num_pools=self.config.get('pool_connections'),
maxsize=self.config.get('pool_maxsize')
)
def __repr__(self):
return '<requests-client at 0x%x>' % (id(self))
def __enter__(self):
return self
def __exit__(self, *args):
pass
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
return_response=True,
config=None,
prefetch=False,
verify=None,
cert=None):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True by default.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param return_response: (optional) If False, an un-sent Request object will returned.
:param config: (optional) A configuration dictionary.
:param prefetch: (optional) if ``True``, the response content will be immediately downloaded.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
method = str(method).upper()
# Default empty dicts for dict params.
data = {} if data is None else data
files = {} if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
prefetch = self.prefetch or prefetch
# use session's hooks as defaults
for key, cb in list(self.hooks.items()):
hooks.setdefault(key, cb)
# Expand header values.
if headers:
for k, v in list(headers.items()) or {}:
headers[k] = header_expand(v)
args = dict(
method=method,
url=url,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
hooks=hooks,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
config=config,
prefetch=prefetch,
verify=verify,
cert=cert,
_poolmanager=self.poolmanager
)
# merge session cookies into passed-in ones
dead_cookies = None
# passed-in cookies must become a CookieJar:
if not isinstance(cookies, cookielib.CookieJar):
args['cookies'] = cookiejar_from_dict(cookies)
# support unsetting cookies that have been passed in with None values
# this is only meaningful when `cookies` is a dict ---
# for a real CookieJar, the client should use session.cookies.clear()
if cookies is not None:
dead_cookies = [name for name in cookies if cookies[name] is None]
# merge the session's cookies into the passed-in cookies:
for cookie in self.cookies:
args['cookies'].set_cookie(cookie)
# remove the unset cookies from the jar we'll be using with the current request
# (but not from the session's own store of cookies):
if dead_cookies is not None:
for name in dead_cookies:
remove_cookie_by_name(args['cookies'], name)
# Merge local kwargs with session kwargs.
for attr in self.__attrs__:
# we already merged cookies:
if attr == 'cookies':
continue
session_val = getattr(self, attr, None)
local_val = args.get(attr)
args[attr] = merge_kwargs(local_val, session_val)
# Arguments manipulation hook.
args = dispatch_hook('args', args['hooks'], args)
# Create the (empty) response.
r = Request(**args)
# Give the response some context.
r.session = self
# Don't send if asked nicely.
if not return_response:
return r
# Send the HTTP Request.
r.send(prefetch=prefetch)
# Send any cookies back up the to the session.
# (in safe mode, cookies may be None if the request didn't succeed)
if r.response.cookies is not None:
for cookie in r.response.cookies:
self.cookies.set_cookie(cookie)
# Return the response.
return r.response
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('get', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('options', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('head', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('post', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('put', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('patch', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('delete', url, **kwargs)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager()
def session(**kwargs):
"""Returns a :class:`Session` for context-management."""
return Session(**kwargs)
| bsd-3-clause | c67578e08987ce969bc7e1f39729cca1 | 32.027356 | 116 | 0.597276 | 4.31876 | false | false | false | false |
mozilla/mozilla-ignite | apps/timeslot/models.py | 1 | 2218 | from datetime import timedelta, datetime
from django.conf import settings
from django.db import models
from django_extensions.db.fields import AutoSlugField
from timeslot.managers import (TimeSlotFreeManager, ReleaseManager,
TimeSlotManager)
from timeslot.utils import shorten_object
class TimeSlot(models.Model):
"""Defines the ``TimeSlot`` available for booking"""
start_date = models.DateTimeField()
end_date = models.DateTimeField()
notes = models.TextField(blank=True)
is_booked = models.BooleanField(default=False)
submission = models.ForeignKey('challenges.Submission', blank=True,
null=True)
booking_date = models.DateTimeField(blank=True, null=True)
webcast_url = models.URLField(verify_exists=False, max_length=500,
blank=True)
release = models.ForeignKey('timeslot.Release')
# managers
objects = TimeSlotManager()
available = TimeSlotFreeManager()
class Meta:
ordering = ['start_date', ]
def __unicode__(self):
return u'TimeSlot: %s - %s' % (self.start_date, self.end_date)
@property
def short_id(self):
return shorten_object(self)
def has_expired(self):
"""Determines if this booking has expired"""
expire_date = self.booking_date + \
timedelta(seconds=settings.BOOKING_EXPIRATION)
return any([expire_date < datetime.utcnow(), self.is_booked])
class Release(models.Model):
"""Each ``TimeSlot`` are part of a release"""
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name')
is_current = models.BooleanField(default=True)
phase = models.ForeignKey('challenges.Phase')
phase_round = models.ForeignKey('challenges.PhaseRound',
blank=True, null=True)
# managers
objects = ReleaseManager()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""Makes sure we only have a current release"""
if self.is_current:
self.__class__.objects.update(is_current=False)
super(Release, self).save(*args, **kwargs)
| bsd-3-clause | 65319af5e991031513cf097d8736abbc | 33.65625 | 71 | 0.643372 | 4.145794 | false | false | false | false |
mozilla/mozilla-ignite | vendor-local/lib/python/rsa/_version133.py | 5 | 11243 | """RSA module
pri = k[1] //Private part of keys d,p,q
Module for calculating large primes, and RSA encryption, decryption,
signing and verification. Includes generating public and private keys.
WARNING: this code implements the mathematics of RSA. It is not suitable for
real-world secure cryptography purposes. It has not been reviewed by a security
expert. It does not include padding of data. There are many ways in which the
output of this module, when used without any modification, can be sucessfully
attacked.
"""
__author__ = "Sybren Stuvel, Marloes de Boer and Ivo Tamboer"
__date__ = "2010-02-05"
__version__ = '1.3.3'
# NOTE: Python's modulo can return negative numbers. We compensate for
# this behaviour using the abs() function
from cPickle import dumps, loads
import base64
import math
import os
import random
import sys
import types
import zlib
# Display a warning that this insecure version is imported.
import warnings
warnings.warn('Insecure version of the RSA module is imported as %s, be careful'
% __name__)
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(42, 6)
6
"""
if p<q: return gcd(q, p)
if q == 0: return p
return gcd(q, abs(p%q))
def bytes2int(bytes):
"""Converts a list of bytes or a string to an integer
>>> (128*256 + 64)*256 + + 15
8405007
>>> l = [128, 64, 15]
>>> bytes2int(l)
8405007
"""
if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
raise TypeError("You must pass a string or a list")
# Convert byte stream to integer
integer = 0
for byte in bytes:
integer *= 256
if type(byte) is types.StringType: byte = ord(byte)
integer += byte
return integer
def int2bytes(number):
"""Converts a number to a string of bytes
>>> bytes2int(int2bytes(123456789))
123456789
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
string = ""
while number > 0:
string = "%s%s" % (chr(number & 0xFF), string)
number /= 256
return string
def fast_exponentiation(a, p, n):
"""Calculates r = a^p mod n
"""
result = a % n
remainders = []
while p != 1:
remainders.append(p & 1)
p = p >> 1
while remainders:
rem = remainders.pop()
result = ((a ** rem) * result ** 2) % n
return result
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits rounded up
to whole bytes"""
nbytes = ceil(nbits/8.)
randomdata = os.urandom(nbytes)
return bytes2int(randomdata)
def ceil(x):
"""ceil(x) -> int(math.ceil(x))"""
return int(math.ceil(x))
def randint(minvalue, maxvalue):
"""Returns a random integer x with minvalue <= x <= maxvalue"""
# Safety - get a lot of random data even if the range is fairly
# small
min_nbits = 32
# The range of the random numbers we need to generate
range = maxvalue - minvalue
# Which is this number of bytes
rangebytes = ceil(math.log(range, 2) / 8.)
# Convert to bits, but make sure it's always at least min_nbits*2
rangebits = max(rangebytes * 8, min_nbits * 2)
# Take a random number of bits between min_nbits and rangebits
nbits = random.randint(min_nbits, rangebits)
return (read_random_int(nbits) % range) + minvalue
def fermat_little_theorem(p):
"""Returns 1 if p may be prime, and something else if p definitely
is not prime"""
a = randint(1, p-1)
return fast_exponentiation(a, p-1, p)
def jacobi(a, b):
"""Calculates the value of the Jacobi symbol (a/b)
"""
if a % b == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
b, a = a, b % a
else:
if ((b ** 2 - 1) >> 3) & 1:
result = -result
a = a >> 1
return result
def jacobi_witness(x, n):
"""Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
"""
j = jacobi(x, n) % n
f = fast_exponentiation(x, (n-1)/2, n)
if j == f: return False
return True
def randomized_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number if composite, and True if it's
probably prime.
"""
q = 0.5 # Property of the jacobi_witness function
# t = int(math.ceil(k / math.log(1/q, 2)))
t = ceil(k / math.log(1/q, 2))
for i in range(t+1):
x = randint(1, n-1)
if jacobi_witness(x, n): return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(42)
0
>>> is_prime(41)
1
"""
"""
if not fermat_little_theorem(number) == 1:
# Not prime, according to Fermat's little theorem
return False
"""
if randomized_primality_testing(number, 5):
# Prime, according to Jacobi
return True
# Not prime
return False
def getprime(nbits):
"""Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
other words: nbits is rounded up to whole bytes.
>>> p = getprime(8)
>>> is_prime(p-1)
0
>>> is_prime(p)
1
>>> is_prime(p+1)
0
"""
nbytes = int(math.ceil(nbits/8.))
while True:
integer = read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if is_prime(integer): break
# Retry if not prime
return integer
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
1
>>> are_relatively_prime(2, 4)
0
"""
d = gcd(a, b)
return (d == 1)
def find_p_q(nbits):
"""Returns a tuple of two different primes of nbits bits"""
p = getprime(nbits)
while True:
q = getprime(nbits)
if not q == p: break
return (p, q)
def extended_euclid_gcd(a, b):
"""Returns a tuple (d, i, j) such that d = gcd(a, b) = ia + jb
"""
if b == 0:
return (a, 1, 0)
q = abs(a % b)
r = long(a / b)
(d, k, l) = extended_euclid_gcd(b, q)
return (d, l, k - l*r)
# Main function: calculate encryption and decryption keys
def calculate_keys(p, q, nbits):
"""Calculates an encryption and a decryption key for p and q, and
returns them as a tuple (e, d)"""
n = p * q
phi_n = (p-1) * (q-1)
while True:
# Make sure e has enough bits so we ensure "wrapping" through
# modulo n
e = getprime(max(8, nbits/2))
if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
(d, i, j) = extended_euclid_gcd(e, phi_n)
if not d == 1:
raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
if not (e * i) % phi_n == 1:
raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
return (e, i)
def gen_keys(nbits):
"""Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
"""
while True:
(p, q) = find_p_q(nbits)
(e, d) = calculate_keys(p, q, nbits)
# For some reason, d is sometimes negative. We don't know how
# to fix it (yet), so we keep trying until everything is shiny
if d > 0: break
return (p, q, e, d)
def gen_pubpriv_keys(nbits):
"""Generates public and private keys, and returns them as (pub,
priv).
The public key consists of a dict {e: ..., , n: ....). The private
key consists of a dict {d: ...., p: ...., q: ....).
"""
(p, q, e, d) = gen_keys(nbits)
return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
def encrypt_int(message, ekey, n):
"""Encrypts a message using encryption key 'ekey', working modulo
n"""
if type(message) is types.IntType:
return encrypt_int(long(message), ekey, n)
if not type(message) is types.LongType:
raise TypeError("You must pass a long or an int")
if message > 0 and \
math.floor(math.log(message, 2)) > math.floor(math.log(n, 2)):
raise OverflowError("The message is too long")
return fast_exponentiation(message, ekey, n)
def decrypt_int(cyphertext, dkey, n):
"""Decrypts a cypher text using the decryption key 'dkey', working
modulo n"""
return encrypt_int(cyphertext, dkey, n)
def sign_int(message, dkey, n):
"""Signs 'message' using key 'dkey', working modulo n"""
return decrypt_int(message, dkey, n)
def verify_int(signed, ekey, n):
"""verifies 'signed' using key 'ekey', working modulo n"""
return encrypt_int(signed, ekey, n)
def picklechops(chops):
"""Pickles and base64encodes it's argument chops"""
value = zlib.compress(dumps(chops))
encoded = base64.encodestring(value)
return encoded.strip()
def unpicklechops(string):
"""base64decodes and unpickes it's argument string into chops"""
return loads(zlib.decompress(base64.decodestring(string)))
def chopstring(message, key, n, funcref):
"""Splits 'message' into chops that are at most as long as n,
converts these into integers, and calls funcref(integer, key, n)
for each chop.
Used by 'encrypt' and 'sign'.
"""
msglen = len(message)
mbits = msglen * 8
nbits = int(math.floor(math.log(n, 2)))
nbytes = nbits / 8
blocks = msglen / nbytes
if msglen % nbytes > 0:
blocks += 1
cypher = []
for bindex in range(blocks):
offset = bindex * nbytes
block = message[offset:offset+nbytes]
value = bytes2int(block)
cypher.append(funcref(value, key, n))
return picklechops(cypher)
def gluechops(chops, key, n, funcref):
"""Glues chops back together into a string. calls
funcref(integer, key, n) for each chop.
Used by 'decrypt' and 'verify'.
"""
message = ""
chops = unpicklechops(chops)
for cpart in chops:
mpart = funcref(cpart, key, n)
message += int2bytes(mpart)
return message
def encrypt(message, key):
"""Encrypts a string 'message' with the public key 'key'"""
return chopstring(message, key['e'], key['n'], encrypt_int)
def sign(message, key):
"""Signs a string 'message' with the private key 'key'"""
return chopstring(message, key['d'], key['p']*key['q'], decrypt_int)
def decrypt(cypher, key):
"""Decrypts a cypher with the private key 'key'"""
return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
def verify(cypher, key):
"""Verifies a cypher with the public key 'key'"""
return gluechops(cypher, key['e'], key['n'], encrypt_int)
# Do doctest if we're not imported
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["gen_pubpriv_keys", "encrypt", "decrypt", "sign", "verify"]
| bsd-3-clause | a29449d20cbd3cedb861302ab6e18a77 | 24.552273 | 97 | 0.597794 | 3.316519 | false | false | false | false |
mozilla/mozilla-ignite | apps/projects/migrations/0006_auto__add_field_link_subscription.py | 2 | 9607 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Link.subscription'
db.add_column('projects_link', 'subscription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['subscriber.Subscription'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Link.subscription'
db.delete_column('projects_link', 'subscription_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.link': {
'Meta': {'object_name': 'Link'},
'blog': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'subscribe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subscriber.Subscription']", 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Link']", 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'subscriber.subscription': {
'Meta': {'object_name': 'Subscription'},
'hub': ('django.db.models.fields.URLField', [], {'max_length': '1023'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lease_expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'topic': ('django.db.models.fields.URLField', [], {'max_length': '1023'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verify_token': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.link': {
'Meta': {'object_name': 'Link'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service': ('django.db.models.fields.CharField', [], {'default': "u'other'", 'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Link']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['projects']
| bsd-3-clause | c908a9f8a33dadd925b120fdf67c0612 | 71.233083 | 182 | 0.547101 | 3.702119 | false | false | false | false |
mozilla/mozilla-ignite | vendor-local/lib/python/south/db/sqlite3.py | 10 | 9009 | import inspect
import re
from django.db.models import ForeignKey
from south.db import generic
from django.core.management.commands import inspectdb
class DatabaseOperations(generic.DatabaseOperations):
"""
SQLite3 implementation of database operations.
"""
backend_name = "sqlite3"
# SQLite ignores several constraints. I wish I could.
supports_foreign_keys = False
has_check_constraints = False
def add_column(self, table_name, name, field, *args, **kwds):
"""
Adds a column.
"""
# If it's not nullable, and has no default, raise an error (SQLite is picky)
if (not field.null and
(not field.has_default() or field.get_default() is None) and
not field.empty_strings_allowed):
raise ValueError("You cannot add a null=False column without a default value.")
# Initialise the field.
field.set_attributes_from_name(name)
# We add columns by remaking the table; even though SQLite supports
# adding columns, it doesn't support adding PRIMARY KEY or UNIQUE cols.
self._remake_table(table_name, added={
field.column: self._column_sql_for_create(table_name, name, field, False),
})
def _remake_table(self, table_name, added={}, renames={}, deleted=[], altered={},
primary_key_override=None, uniques_deleted=[]):
"""
Given a table and three sets of changes (renames, deletes, alters),
recreates it with the modified schema.
"""
# Dry runs get skipped completely
if self.dry_run:
return
# Temporary table's name
temp_name = "_south_new_" + table_name
# Work out the (possibly new) definitions of each column
definitions = {}
cursor = self._get_connection().cursor()
# Get the index descriptions
indexes = self._get_connection().introspection.get_indexes(cursor, table_name)
multi_indexes = self._get_multi_indexes(table_name)
# Work out new column defs.
for column_info in self._get_connection().introspection.get_table_description(cursor, table_name):
name = column_info[0]
if name in deleted:
continue
# Get the type, ignoring PRIMARY KEY (we need to be consistent)
type = column_info[1].replace("PRIMARY KEY", "")
# Add on unique or primary key if needed.
if indexes[name]['unique'] and name not in uniques_deleted:
type += " UNIQUE"
if (primary_key_override and primary_key_override == name) or \
(not primary_key_override and indexes[name]['primary_key']):
type += " PRIMARY KEY"
# Deal with a rename
if name in renames:
name = renames[name]
# Add to the defs
definitions[name] = type
# Add on altered columns
definitions.update(altered)
# Add on the new columns
definitions.update(added)
# Alright, Make the table
self.execute("CREATE TABLE %s (%s)" % (
self.quote_name(temp_name),
", ".join(["%s %s" % (self.quote_name(cname), ctype) for cname, ctype in definitions.items()]),
))
# Copy over the data
self._copy_data(table_name, temp_name, renames)
# Delete the old table, move our new one over it
self.delete_table(table_name)
self.rename_table(temp_name, table_name)
# Recreate multi-valued indexes
# We can't do that before since it's impossible to rename indexes
# and index name scope is global
self._make_multi_indexes(table_name, multi_indexes, renames=renames, deleted=deleted, uniques_deleted=uniques_deleted)
def _copy_data(self, src, dst, field_renames={}):
"Used to copy data into a new table"
# Make a list of all the fields to select
cursor = self._get_connection().cursor()
src_fields = [column_info[0] for column_info in self._get_connection().introspection.get_table_description(cursor, src)]
dst_fields = [column_info[0] for column_info in self._get_connection().introspection.get_table_description(cursor, dst)]
src_fields_new = []
dst_fields_new = []
for field in src_fields:
if field in field_renames:
dst_fields_new.append(self.quote_name(field_renames[field]))
elif field in dst_fields:
dst_fields_new.append(self.quote_name(field))
else:
continue
src_fields_new.append(self.quote_name(field))
# Copy over the data
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s;" % (
self.quote_name(dst),
', '.join(dst_fields_new),
', '.join(src_fields_new),
self.quote_name(src),
))
def _create_unique(self, table_name, columns):
self.execute("CREATE UNIQUE INDEX %s ON %s(%s);" % (
self.quote_name('%s_%s' % (table_name, '__'.join(columns))),
self.quote_name(table_name),
', '.join(self.quote_name(c) for c in columns),
))
def _get_multi_indexes(self, table_name):
indexes = []
cursor = self._get_connection().cursor()
cursor.execute('PRAGMA index_list(%s)' % self.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % self.quote_name(index))
info = cursor.fetchall()
if len(info) == 1:
continue
columns = []
for field in info:
columns.append(field[2])
indexes.append(columns)
return indexes
def _make_multi_indexes(self, table_name, indexes, deleted=[], renames={}, uniques_deleted=[]):
for index in indexes:
columns = []
for name in index:
# Handle deletion
if name in deleted:
columns = []
break
# Handle renames
if name in renames:
name = renames[name]
columns.append(name)
if columns and columns != uniques_deleted:
self._create_unique(table_name, columns)
def _column_sql_for_create(self, table_name, name, field, explicit_name=True):
"Given a field and its name, returns the full type for the CREATE TABLE."
field.set_attributes_from_name(name)
if not explicit_name:
name = field.db_column
else:
field.column = name
sql = self.column_sql(table_name, name, field, with_name=False, field_prepared=True)
#if field.primary_key:
# sql += " PRIMARY KEY"
#if field.unique:
# sql += " UNIQUE"
return sql
def alter_column(self, table_name, name, field, explicit_name=True):
"""
Changes a column's SQL definition
"""
# Remake the table correctly
self._remake_table(table_name, altered={
name: self._column_sql_for_create(table_name, name, field, explicit_name),
})
def delete_column(self, table_name, column_name):
"""
Deletes a column.
"""
self._remake_table(table_name, deleted=[column_name])
def rename_column(self, table_name, old, new):
"""
Renames a column from one name to another.
"""
self._remake_table(table_name, renames={old: new})
def create_unique(self, table_name, columns):
"""
Create an unique index on columns
"""
self._create_unique(table_name, columns)
def delete_unique(self, table_name, columns):
"""
Delete an unique index
"""
self._remake_table(table_name, uniques_deleted=columns)
def create_primary_key(self, table_name, columns):
if not isinstance(columns, (list, tuple)):
columns = [columns]
assert len(columns) == 1, "SQLite backend does not support multi-column primary keys"
self._remake_table(table_name, primary_key_override=columns[0])
# Not implemented this yet.
def delete_primary_key(self, table_name):
# By passing True in, we make sure we wipe all existing PKs.
self._remake_table(table_name, primary_key_override=True)
# No cascades on deletes
def delete_table(self, table_name, cascade=True):
generic.DatabaseOperations.delete_table(self, table_name, False)
| bsd-3-clause | 2d4fc18a5fd394d6a87ae72c5ee897a4 | 38.95 | 128 | 0.567433 | 4.209813 | false | false | false | false |
mozilla/mozilla-ignite | vendor-local/lib/python/django_extensions/tests/encrypted_fields.py | 22 | 2792 | import unittest
from django.db import connection
from django.conf import settings
from django.core.management import call_command
from django.db.models import loading
# Only perform encrypted fields tests if keyczar is present
# Resolves http://github.com/django-extensions/django-extensions/issues/#issue/17
try:
from keyczar import keyczar
from django_extensions.tests.models import Secret
from django_extensions.db.fields.encrypted import EncryptedTextField, EncryptedCharField
keyczar_active = True
except ImportError:
keyczar_active = False
class EncryptedFieldsTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
if keyczar_active:
self.crypt = keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
super(EncryptedFieldsTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.append('django_extensions.tests')
loading.cache.loaded = False
call_command('syncdb', verbosity=0)
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
def testCharFieldCreate(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(name=test_val)
cursor = connection.cursor()
query = "SELECT name FROM %s WHERE id = %d" % (Secret._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = self.crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def testCharFieldRead(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(name=test_val)
retrieved_secret = Secret.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.name)
def testTextFieldCreate(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(text=test_val)
cursor = connection.cursor()
query = "SELECT text FROM %s WHERE id = %d" % (Secret._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = self.crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def testTextFieldRead(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(text=test_val)
retrieved_secret = Secret.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.text)
| bsd-3-clause | 4191ba5a41a04ffc38bdf5f73ff2fe9a | 36.226667 | 92 | 0.672278 | 3.835165 | false | true | false | false |
mozilla/mozilla-ignite | apps/challenges/exporter.py | 1 | 2709 | from challenges.models import SubmissionParent
def get_category_data(obj):
return {
'name': obj.name,
'slug': obj.slug,
}
def get_author(profile):
user = profile.user
return {
'name': profile.name,
'title': profile.title,
'website': profile.website,
'bio': profile.bio,
'username': user.username,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
}
def get_phase_data(phase):
return {
'name': phase.name,
'start_date': phase.start_date,
'end_date': phase.end_date,
'judging_start_date': phase.judging_start_date,
'judging_end_date': phase.judging_end_date,
'order': phase.order
}
def get_phase_round_data(obj):
return {
'name': obj.name,
'slug': obj.slug,
'start_date': obj.start_date,
'end_date': obj.end_date,
'judging_start_date': obj.judging_start_date,
'judging_end_date': obj.judging_end_date,
}
def get_submission_data(obj):
data = {
'title': obj.title,
'brief_description': obj.brief_description,
'description': obj.description,
'category': get_category_data(obj.category),
'created_by': get_author(obj.created_by),
'created_on': obj.created_on,
'updated_on': obj.updated_on,
'is_winner': obj.is_winner,
'is_draft': obj.is_draft,
'phase': get_phase_data(obj.phase),
'collaborators': obj.collaborators,
'life_improvements': obj.life_improvements,
'take_advantage': obj.take_advantage,
'interest_making': obj.interest_making,
'team_members': obj.team_members,
'repository_url': obj.repository_url,
'blog_url': obj.blog_url,
'required_effort': obj.required_effort,
}
if obj.sketh_note:
data['sketh_note'] = obj.sketh_note.url
if obj.phase_round:
data['phase_round'] = get_phase_round_data(obj.phase_round)
return data
def get_parent_data(parent):
data = {
'name': parent.name,
'slug': parent.slug,
'created': parent.created,
'modified': parent.modified,
'is_featured': parent.is_featured,
}
if parent.submission:
submission_data = get_submission_data(parent.submission)
data.update(submission_data)
return data
def export_entries():
"""Export all the existing entries."""
parent_list = (SubmissionParent.objects
.filter(status=SubmissionParent.ACTIVE))
entries = []
for parent in parent_list:
entries.append(get_parent_data(parent))
return entries
| bsd-3-clause | f9db986b6bcd1ce8cca55d9bb408b0ec | 27.21875 | 67 | 0.591362 | 3.424779 | false | false | false | false |
mozilla/mozilla-ignite | vendor-local/lib/python/django_extensions/management/modelviz.py | 8 | 13604 | #!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-a] [-d] [-g] [-n] [-L <language>] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-a, --all_applications
show models from all applications.
-d, --disable_fields
don't show the class member fields.
-g, --group_models
draw an enclosing box around models from the same app.
-i, --include_models=User,Person,Car
only include selected models in graph.
-n, --verbose_names
use verbose_name for field and models.
-L, --language
specify language used for verrbose_name localization
-x, --exclude_columns
exclude specific column(s) from the graph.
-X, --exclude_models
exclude specific model(s) from the graph.
-e, --inheritance
show inheritance arrows.
"""
__version__ = "0.9"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Bas van Oostveen <v.oostveen@gmail.com>",
"Joern Hees <gitdev@joernhees.de>"
]
import os
import sys
import getopt
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.utils.translation import activate as activate_language
from django.utils.safestring import mark_safe
from django.template import Template, Context, loader
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField, RelatedField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
def parse_file_or_list(arg):
if not arg:
return []
if not ',' in arg and os.path.isfile(arg):
return [e.strip() for e in open(arg).readlines()]
return arg.split(',')
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = parse_file_or_list(kwargs.get('include_models', ""))
all_applications = kwargs.get('all_applications', False)
use_subgraph = kwargs.get('group_models', False)
verbose_names = kwargs.get('verbose_names', False)
inheritance = kwargs.get('inheritance', False)
language = kwargs.get('language', None)
if language is not None:
activate_language(language)
exclude_columns = parse_file_or_list(kwargs.get('exclude_columns', ""))
exclude_models = parse_file_or_list(kwargs.get('exclude_models', ""))
def skip_field(field):
if exclude_columns:
if verbose_names and field.verbose_name:
if field.verbose_name in exclude_columns:
return True
if field.name in exclude_columns:
return True
return False
t = loader.get_template('django_extensions/graph_models/head.html')
c = Context({})
dot = t.render(c)
apps = []
if all_applications:
apps = models.get_apps()
for app_label in app_labels:
app = models.get_app(app_label)
if not app in apps:
apps.append(app)
graphs = []
for app in apps:
graph = Context({
'name': '"%s"' % app.__name__,
'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
'disable_fields': disable_fields,
'use_subgraph': use_subgraph,
'models': []
})
appmodels = get_models(app)
abstract_models = []
for appmodel in appmodels:
abstract_models = abstract_models + [abstract_model for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
abstract_models = list(set(abstract_models)) # remove duplicates
appmodels = abstract_models + appmodels
for appmodel in appmodels:
appmodel_abstracts = [abstract_model.__name__ for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
# collect all attribs of abstract superclasses
def getBasesAbstractFields(c):
_abstract_fields = []
for e in c.__bases__:
if hasattr(e, '_meta') and e._meta.abstract:
_abstract_fields.extend(e._meta.fields)
_abstract_fields.extend(getBasesAbstractFields(e))
return _abstract_fields
abstract_fields = getBasesAbstractFields(appmodel)
model = {
'app_name': appmodel.__module__.replace(".", "_"),
'name': appmodel.__name__,
'abstracts': appmodel_abstracts,
'fields': [],
'relations': []
}
# consider given model name ?
def consider(model_name):
if exclude_models and model_name in exclude_models:
return False
return not include_models or model_name in include_models
if not consider(appmodel._meta.object_name):
continue
if verbose_names and appmodel._meta.verbose_name:
model['label'] = appmodel._meta.verbose_name
else:
model['label'] = model['name']
# model attributes
def add_attributes(field):
if verbose_names and field.verbose_name:
label = field.verbose_name
else:
label = field.name
t = type(field).__name__
if isinstance(field, (OneToOneField, ForeignKey)):
t += " ({0})".format(field.rel.field_name)
# TODO: ManyToManyField, GenericRelation
model['fields'].append({
'name': field.name,
'label': label,
'type': t,
'blank': field.blank,
'abstract': field in abstract_fields,
})
# Find all the real attributes. Relations are depicted as graph edges instead of attributes
attributes = [field for field in appmodel._meta.local_fields if not isinstance(field, RelatedField)]
# find primary key and print it first, ignoring implicit id if other pk exists
pk = appmodel._meta.pk
if not appmodel._meta.abstract and pk in attributes:
add_attributes(pk)
for field in attributes:
if skip_field(field):
continue
if not field.primary_key:
add_attributes(field)
# FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph?
#if appmodel._meta.many_to_many:
# for field in appmodel._meta.many_to_many:
# if skip_field(field):
# continue
# add_attributes(field)
# relations
def add_relation(field, extras=""):
if verbose_names and field.verbose_name:
label = field.verbose_name
else:
label = field.name
# show related field name
if hasattr(field, 'related_query_name'):
label += ' (%s)' % field.related_query_name()
_rel = {
'target_app': field.rel.to.__module__.replace('.', '_'),
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'label': label,
'arrows': extras,
'needs_node': True
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.local_fields:
if field.attname.endswith('_ptr_id'): # excluding field redundant with inheritance relation
continue
if field in abstract_fields: # excluding fields inherited from abstract classes. they too show as local_fields
continue
if skip_field(field):
continue
if isinstance(field, OneToOneField):
add_relation(field, '[arrowhead=none, arrowtail=none]')
elif isinstance(field, ForeignKey):
add_relation(field, '[arrowhead=none, arrowtail=dot]')
for field in appmodel._meta.local_many_to_many:
if skip_field(field):
continue
if isinstance(field, ManyToManyField):
if (getattr(field, 'creates_table', False) or # django 1.1.
(hasattr(field.rel.through, '_meta') and field.rel.through._meta.auto_created)): # django 1.2
add_relation(field, '[arrowhead=dot arrowtail=dot, dir=both]')
elif isinstance(field, GenericRelation):
add_relation(field, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))
if inheritance:
# add inheritance arrows
for parent in appmodel.__bases__:
if hasattr(parent, "_meta"): # parent is a model
l = "multi-table"
if parent._meta.abstract:
l = "abstract"
if appmodel._meta.proxy:
l = "proxy"
l += r"\ninheritance"
_rel = {
'target_app': parent.__module__.replace(".", "_"),
'target': parent.__name__,
'type': "inheritance",
'name': "inheritance",
'label': l,
'arrows': '[arrowhead=empty, arrowtail=none]',
'needs_node': True
}
# TODO: seems as if abstract models aren't part of models.getModels, which is why they are printed by this without any attributes.
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
graph['models'].append(model)
graphs.append(graph)
nodes = []
for graph in graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in graphs:
# don't draw duplication nodes because of relations
for model in graph['models']:
for relation in model['relations']:
if relation['target'] in nodes:
relation['needs_node'] = False
# render templates
t = loader.get_template('django_extensions/graph_models/body.html')
dot += '\n' + t.render(graph)
for graph in graphs:
t = loader.get_template('django_extensions/graph_models/rel.html')
dot += '\n' + t.render(graph)
t = loader.get_template('django_extensions/graph_models/tail.html')
c = Context({})
dot += '\n' + t.render(c)
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hadgi:L:x:X:en",
["help", "all_applications", "disable_fields", "group_models", "include_models=", "inheritance", "verbose_names", "language=", "exclude_columns=", "exclude_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-a", "--all_applications"):
kwargs['all_applications'] = True
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-g", "--group_models"):
kwargs['group_models'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg
if opt in ("-e", "--inheritance"):
kwargs['inheritance'] = True
if opt in ("-n", "--verbose-names"):
kwargs['verbose_names'] = True
if opt in ("-L", "--language"):
kwargs['language'] = arg
if opt in ("-x", "--exclude_columns"):
kwargs['exclude_columns'] = arg
if opt in ("-X", "--exclude_models"):
kwargs['exclude_models'] = arg
if not args and not kwargs.get('all_applications', False):
print __doc__
sys.exit()
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| bsd-3-clause | 9c3de11ce3534348d166fe1ed1b6020c | 36.89415 | 186 | 0.543811 | 4.331105 | false | false | false | false |
mozilla/mozilla-ignite | apps/challenges/migrations/0040_auto__add_field_submission_collaborators.py | 1 | 19083 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Submission.collaborators'
db.add_column('challenges_submission', 'collaborators', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Submission.collaborators'
db.delete_column('challenges_submission', 'collaborators')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'challenges.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'challenges.challenge': {
'Meta': {'object_name': 'Challenge'},
'allow_voting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'challenges.exclusionflag': {
'Meta': {'object_name': 'ExclusionFlag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.externallink': {
'Meta': {'object_name': 'ExternalLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'challenges.judgeassignment': {
'Meta': {'unique_together': "(('submission', 'judge'),)", 'object_name': 'JudgeAssignment'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.judgement': {
'Meta': {'unique_together': "(('submission', 'judge'),)", 'object_name': 'Judgement'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.judginganswer': {
'Meta': {'unique_together': "(('judgement', 'criterion'),)", 'object_name': 'JudgingAnswer'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.JudgingCriterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judgement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['challenges.Judgement']"}),
'rating': ('django.db.models.fields.IntegerField', [], {})
},
'challenges.judgingcriterion': {
'Meta': {'ordering': "('id',)", 'object_name': 'JudgingCriterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_value': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'phases': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'judgement_criteria'", 'blank': 'True', 'through': "orm['challenges.PhaseCriterion']", 'to': "orm['challenges.Phase']"}),
'question': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'challenges.phase': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('challenge', 'name'),)", 'object_name': 'Phase'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': "orm['challenges.Challenge']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 25, 17, 59, 29, 279292)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'})
},
'challenges.phasecriterion': {
'Meta': {'unique_together': "(('phase', 'criterion'),)", 'object_name': 'PhaseCriterion'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.JudgingCriterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'weight': ('django.db.models.fields.DecimalField', [], {'default': '10', 'max_digits': '4', 'decimal_places': '2'})
},
'challenges.phaseround': {
'Meta': {'object_name': 'PhaseRound'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {})
},
'challenges.submission': {
'Meta': {'ordering': "['-id']", 'object_name': 'Submission'},
'brief_description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Category']"}),
'collaborators': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_winner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'phase_round': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.PhaseRound']", 'null': 'True', 'blank': 'True'}),
'sketh_note': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'challenges.submissionhelp': {
'Meta': {'ordering': "('-updated',)", 'object_name': 'SubmissionHelp'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'parent': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['challenges.SubmissionParent']", 'unique': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'challenges.submissionparent': {
'Meta': {'ordering': "('-created',)", 'object_name': 'SubmissionParent'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.submissionversion': {
'Meta': {'unique_together': "(('submission', 'parent'),)", 'object_name': 'SubmissionVersion'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.SubmissionParent']"}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'allow_participation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_sub_projects': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects_following'", 'symmetrical': 'False', 'to': "orm['users.Profile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_project_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'sub_project_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['challenges']
| bsd-3-clause | 05a4710e3ac8c4902192efd562538b35 | 78.182573 | 241 | 0.554944 | 3.721334 | false | false | false | false |
mozilla/mozilla-ignite | vendor-local/lib/python/south/hacks/django_1_0.py | 22 | 1992 | """
Hacks for the Django 1.0/1.0.2 releases.
"""
from django.conf import settings
from django.db import models
from django.db.models.loading import AppCache, cache
from django.utils.datastructures import SortedDict
class Hacks:
def set_installed_apps(self, apps):
"""
Sets Django's INSTALLED_APPS setting to be effectively the list passed in.
"""
# Make sure it's a list.
apps = list(apps)
# Make sure it contains strings
if apps:
assert isinstance(apps[0], basestring), "The argument to set_installed_apps must be a list of strings."
# Monkeypatch in!
settings.INSTALLED_APPS, settings.OLD_INSTALLED_APPS = (
apps,
settings.INSTALLED_APPS,
)
self._redo_app_cache()
def reset_installed_apps(self):
"""
Undoes the effect of set_installed_apps.
"""
settings.INSTALLED_APPS = settings.OLD_INSTALLED_APPS
self._redo_app_cache()
def _redo_app_cache(self):
"""
Used to repopulate AppCache after fiddling with INSTALLED_APPS.
"""
a = AppCache()
a.loaded = False
a.handled = {}
a.postponed = []
a.app_store = SortedDict()
a.app_models = SortedDict()
a.app_errors = {}
a._populate()
def clear_app_cache(self):
"""
Clears the contents of AppCache to a blank state, so new models
from the ORM can be added.
"""
self.old_app_models, cache.app_models = cache.app_models, {}
def unclear_app_cache(self):
"""
Reversed the effects of clear_app_cache.
"""
cache.app_models = self.old_app_models
cache._get_models_cache = {}
def repopulate_app_cache(self):
"""
Rebuilds AppCache with the real model definitions.
"""
cache._populate()
| bsd-3-clause | e4ad673c04245bc38d2ba349903ec26a | 25.573333 | 115 | 0.560241 | 4.15 | false | false | false | false |
mozilla/mozilla-ignite | apps/challenges/management/commands/identicon.py | 3 | 7520 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
identicon.py
identicon python implementation.
by Shin Adachi <shn@glucose.jp>
= usage =
== commandline ==
>>> python identicon.py [code]
== python ==
>>> import identicon
>>> identicon.render_identicon(code, size)
Return a PIL Image class instance which have generated identicon image.
```size``` specifies `patch size`. Generated image size is 3 * ```size```.
"""
# g
# PIL Modules
import Image
import ImageDraw
import ImagePath
import ImageColor
__all__ = ['render_identicon', 'IdenticonRendererBase']
class Matrix2D(list):
"""Matrix for Patch rotation"""
def __init__(self, initial=[0.] * 9):
assert isinstance(initial, list) and len(initial) == 9
list.__init__(self, initial)
def clear(self):
for i in xrange(9):
self[i] = 0.
def set_identity(self):
self.clear()
for i in xrange(3):
self[i] = 1.
def __str__(self):
return '[%s]' % ', '.join('%3.2f' % v for v in self)
def __mul__(self, other):
r = []
if isinstance(other, Matrix2D):
for y in xrange(3):
for x in xrange(3):
v = 0.0
for i in xrange(3):
v += (self[i * 3 + x] * other[y * 3 + i])
r.append(v)
else:
raise NotImplementedError
return Matrix2D(r)
def for_PIL(self):
return self[0:6]
@classmethod
def translate(kls, x, y):
return kls([1.0, 0.0, float(x),
0.0, 1.0, float(y),
0.0, 0.0, 1.0])
@classmethod
def scale(kls, x, y):
return kls([float(x), 0.0, 0.0,
0.0, float(y), 0.0,
0.0, 0.0, 1.0])
"""
# need `import math`
@classmethod
def rotate(kls, theta, pivot=None):
c = math.cos(theta)
s = math.sin(theta)
matR = kls([c, -s, 0., s, c, 0., 0., 0., 1.])
if not pivot:
return matR
return kls.translate(-pivot[0], -pivot[1]) * matR *
kls.translate(*pivot)
"""
@classmethod
def rotateSquare(kls, theta, pivot=None):
theta = theta % 4
c = [1., 0., -1., 0.][theta]
s = [0., 1., 0., -1.][theta]
matR = kls([c, -s, 0., s, c, 0., 0., 0., 1.])
if not pivot:
return matR
return kls.translate(-pivot[0], -pivot[1]) * matR * \
kls.translate(*pivot)
class IdenticonRendererBase(object):
PATH_SET = []
def __init__(self, code):
"""
@param code code for icon
"""
if not isinstance(code, int):
code = int(code)
self.code = code
def render(self, size):
"""
render identicon to PIL.Image
@param size identicon patchsize. (image size is 3 * [size])
@return PIL.Image
"""
# decode the code
middle, corner, side, foreColor, backColor = self.decode(self.code)
# make image
image = Image.new("RGB", (size * 3, size * 3))
draw = ImageDraw.Draw(image)
# fill background
draw.rectangle((0, 0, image.size[0], image.size[1]), fill=0)
kwds = {
'draw': draw,
'size': size,
'foreColor': foreColor,
'backColor': backColor}
# middle patch
self.drawPatch((1, 1), middle[2], middle[1], middle[0], **kwds)
# side patch
kwds['type'] = side[0]
for i in xrange(4):
pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i]
self.drawPatch(pos, side[2] + 1 + i, side[1], **kwds)
# corner patch
kwds['type'] = corner[0]
for i in xrange(4):
pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i]
self.drawPatch(pos, corner[2] + 1 + i, corner[1], **kwds)
return image
def drawPatch(self, pos, turn, invert, type, draw, size, foreColor,
backColor):
"""
@param size patch size
"""
path = self.PATH_SET[type]
if not path:
# blank patch
invert = not invert
path = [(0., 0.), (1., 0.), (1., 1.), (0., 1.), (0., 0.)]
patch = ImagePath.Path(path)
if invert:
foreColor, backColor = backColor, foreColor
mat = Matrix2D.rotateSquare(turn, pivot=(0.5, 0.5)) *\
Matrix2D.translate(*pos) *\
Matrix2D.scale(size, size)
patch.transform(mat.for_PIL())
draw.rectangle((pos[0] * size, pos[1] * size, (pos[0] + 1) * size,
(pos[1] + 1) * size), fill=backColor)
draw.polygon(patch, fill=foreColor, outline=foreColor)
### virtual functions
def decode(self, code):
raise NotImplementedError
class DonRenderer(IdenticonRendererBase):
"""
Don Park's implementation of identicon
see : http://www.docuverse.com/blog/donpark/2007/01/19/identicon-updated-and-source-released
"""
PATH_SET = [
[(0, 0), (4, 0), (4, 4), (0, 4)], # 0
[(0, 0), (4, 0), (0, 4)],
[(2, 0), (4, 4), (0, 4)],
[(0, 0), (2, 0), (2, 4), (0, 4)],
[(2, 0), (4, 2), (2, 4), (0, 2)], # 4
[(0, 0), (4, 2), (4, 4), (2, 4)],
[(2, 0), (4, 4), (2, 4), (3, 2), (1, 2), (2, 4), (0, 4)],
[(0, 0), (4, 2), (2, 4)],
[(1, 1), (3, 1), (3, 3), (1, 3)], # 8
[(2, 0), (4, 0), (0, 4), (0, 2), (2, 2)],
[(0, 0), (2, 0), (2, 2), (0, 2)],
[(0, 2), (4, 2), (2, 4)],
[(2, 2), (4, 4), (0, 4)],
[(2, 0), (2, 2), (0, 2)],
[(0, 0), (2, 0), (0, 2)],
[]] # 15
MIDDLE_PATCH_SET = [0, 4, 8, 15]
# modify path set
for idx in xrange(len(PATH_SET)):
if PATH_SET[idx]:
p = map(lambda vec: (vec[0] / 4.0, vec[1] / 4.0), PATH_SET[idx])
PATH_SET[idx] = p + p[:1]
def decode(self, code):
# decode the code
middleType = self.MIDDLE_PATCH_SET[code & 0x03]
middleInvert= (code >> 2) & 0x01
cornerType = (code >> 3) & 0x0F
cornerInvert= (code >> 7) & 0x01
cornerTurn = (code >> 8) & 0x03
sideType = (code >> 10) & 0x0F
sideInvert = (code >> 14) & 0x01
sideTurn = (code >> 15) & 0x03
blue = (code >> 16) & 0x1F
green = (code >> 21) & 0x1F
red = (code >> 27) & 0x1F
foreColor = (red << 3, green << 3, blue << 3)
return (middleType, middleInvert, 0),\
(cornerType, cornerInvert, cornerTurn),\
(sideType, sideInvert, sideTurn),\
foreColor, ImageColor.getrgb('white')
def render_identicon(code, size, renderer=None):
if not renderer:
renderer = DonRenderer
return renderer(code).render(size)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print 'usage: python identicon.py [CODE]....'
raise SystemExit
for code in sys.argv[1:]:
if code.startswith('0x') or code.startswith('0X'):
code = int(code[2:], 16)
elif code.startswith('0'):
code = int(code[1:], 8)
else:
code = int(code)
icon = render_identicon(code, 24)
icon.save('%08x.png' % code, 'PNG')
| bsd-3-clause | 832ba686ac8d94b3221222c38e1ef3be | 28.147287 | 96 | 0.465824 | 3.275261 | false | false | false | false |
mozilla/mozilla-ignite | apps/events/migrations/0002_auto__add_field_event_featured__add_field_event_featured_image.py | 2 | 6590 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.featured'
db.add_column('events_event', 'featured', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Event.featured_image'
db.add_column('events_event', 'featured_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.featured'
db.delete_column('events_event', 'featured')
# Deleting field 'Event.featured_image'
db.delete_column('events_event', 'featured_image')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {})
},
'users.link': {
'Meta': {'object_name': 'Link'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service': ('django.db.models.fields.CharField', [], {'default': "u'other'", 'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirmation_token': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Link']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['events']
| bsd-3-clause | cf9f81cd72f81fdb500678225d6d98d6 | 66.244898 | 182 | 0.555842 | 3.725269 | false | false | false | false |
mitsuhiko/zine | zine/utils/text.py | 1 | 4714 | # -*- coding: utf-8 -*-
"""
zine.utils.text
~~~~~~~~~~~~~~~
This module provides various text utility functions.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import unicodedata
from datetime import datetime
from itertools import starmap
from urlparse import urlparse
from werkzeug import url_quote
from zine._dynamic.translit_tab import LONG_TABLE, SHORT_TABLE, SINGLE_TABLE
_punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
_string_inc_re = re.compile(r'(\d+)$')
_placeholder_re = re.compile(r'%(\w+)%')
def gen_slug(text, delim=u'-'):
"""Generates a proper slug for the given text. It calls either
`gen_ascii_slug` or `gen_unicode_slug` depending on the application
configuration.
"""
from zine.application import get_application
if get_application().cfg['ascii_slugs']:
return gen_ascii_slug(text, delim)
return gen_unicode_slug(text, delim)
def gen_ascii_slug(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_re.split(text.lower()):
word = _punctuation_re.sub(u'', transliterate(word))
if word:
result.append(word)
return unicode(delim.join(result))
def gen_unicode_slug(text, delim=u'-'):
"""Generate an unicode slug."""
return unicode(delim.join(_punctuation_re.split(text.lower())))
def gen_timestamped_slug(slug, content_type, pub_date=None):
"""Generate a timestamped slug, suitable for use as final URL path."""
from zine.application import get_application
from zine.i18n import to_blog_timezone
cfg = get_application().cfg
if pub_date is None:
pub_date = datetime.utcnow()
pub_date = to_blog_timezone(pub_date)
prefix = cfg['blog_url_prefix'].strip(u'/')
if prefix:
prefix += u'/'
if content_type == 'entry':
fixed = cfg['fixed_url_date_digits']
def handle_match(match):
handler = _slug_parts.get(match.group(1))
if handler is None:
return match.group(0)
return handler(pub_date, slug, fixed)
full_slug = prefix + _placeholder_re.sub(
handle_match, cfg['post_url_format'])
else:
full_slug = u'%s%s' % (prefix, slug)
return full_slug
def increment_string(string):
"""Increment a string by one:
>>> increment_string(u'test')
u'test2'
>>> increment_string(u'test2')
u'test3'
"""
match = _string_inc_re.search(string)
if match is None:
return string + u'2'
return string[:match.start()] + unicode(int(match.group(1)) + 1)
def transliterate(string, table='long'):
"""Transliterate to 8 bit using one of the tables given. The table
must either be ``'long'``, ``'short'`` or ``'single'``.
"""
table = {
'long': LONG_TABLE,
'short': SHORT_TABLE,
'single': SINGLE_TABLE
}[table]
return unicodedata.normalize('NFKC', unicode(string)).translate(table)
def wrap(text, width):
r"""A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line breaks are
posix newlines (\n).
"""
# code from http://code.activestate.com/recipes/148061/
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[len(line) - line.rfind('\n') - 1 +
(word and len(word.split('\n', 1)[0]) or 0) >= width], word),
text.split(' '))
def build_tag_uri(app, date, resource, identifier):
"""Build a unique tag URI.
The tag URI must obey the ABNF defined in
http://www.faqs.org/rfcs/rfc4151.html """
host, path = urlparse(app.cfg['blog_url'])[1:3]
if ':' in host:
host = host.split(':', 1)[0]
path = path.strip('/')
if path:
path = ',' + path
if not isinstance(identifier, basestring):
identifier = str(identifier)
return 'tag:%s,%s:%s/%s;%s' % (host, date.strftime('%Y-%m-%d'), path,
url_quote(resource), url_quote(identifier))
def _make_date_slug_part(key, places):
def handler(datetime, slug, fixed):
value = getattr(datetime, key)
if fixed:
return (u'%%0%dd' % places) % value
return unicode(value)
return key, handler
#: a dict of slug part handlers for gen_timestamped_slug
_slug_parts = dict(starmap(_make_date_slug_part, [
('year', 4),
('month', 2),
('day', 2),
('hour', 2),
('minute', 2),
('second', 2)
]))
_slug_parts['slug'] = lambda d, slug, f: slug
| bsd-3-clause | 0215999cfb507734cc90e1f6ca0a46b2 | 29.412903 | 86 | 0.595672 | 3.544361 | false | false | false | false |
mitsuhiko/zine | zine/utils/redirects.py | 1 | 2312 | # -*- coding: utf-8 -*-
"""
zine.utils.redirects
~~~~~~~~~~~~~~~~~~~~
This module implements the access to the redirect table.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from zine.application import get_application
from zine.database import redirects, db
from zine.utils.http import make_external_url
def _strip_url(url):
"""Strip an URL so that only the path is left."""
cfg = get_application().cfg
if url.startswith(cfg['blog_url']):
url = url[len(cfg['blog_url']):]
return url.lstrip('/')
def lookup_redirect(url):
"""Looks up a redirect. If there is not redirect for the given URL,
the return value is `None`.
"""
row = db.execute(redirects.select(
redirects.c.original == _strip_url(url)
)).fetchone()
if row:
return make_external_url(row.new)
def register_redirect(original, new_url):
"""Register a new redirect. Also an old one that may still exist."""
original = _strip_url(original)
db.execute(redirects.delete(original=original))
db.execute(redirects.insert(), dict(
original=original,
new=_strip_url(new_url)
))
def unregister_redirect(url):
"""Unregister a redirect."""
rv = db.execute(redirects.delete(redirects.c.original == _strip_url(url)))
if not rv.rowcount:
raise ValueError('no such URL')
def get_redirect_map():
"""Return a dict of all redirects."""
return dict((row.original, make_external_url(row.new)) for row in
db.execute(redirects.select()))
def change_url_prefix(old, new):
"""Changes a URL prefix from `old` to `new`. This does not update the
configuration but renames all slugs there were below the old one and
puts it to the new and also registers redirects.
"""
from zine.models import Post
def _rewrite(s):
s = s.strip('/')
if s:
s += '/'
return s
old = _rewrite(old)
new = _rewrite(new)
cut_off = len(old)
posts = Post.query.filter(
Post.slug.like(old.replace('%', '%%') + '%')
).all()
for post in posts:
new_slug = new + post.slug[cut_off:]
register_redirect(post.slug, new_slug)
post.slug = new_slug
| bsd-3-clause | b940f30bb2c0b2fd0280f0151b8b7a73 | 27.195122 | 78 | 0.618945 | 3.711075 | false | false | false | false |
mitsuhiko/zine | zine/importers/feed.py | 1 | 19144 | # -*- coding: utf-8 -*-
"""
zine.importers.feed
~~~~~~~~~~~~~~~~~~~
This importer can import web feeds. Currently it is limited to ATOM
plus optional Zine extensions.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from pickle import loads
from lxml import etree
from zine.application import get_application
from zine.i18n import _, lazy_gettext
from zine.importers import Importer, Blog, Tag, Category, Author, Post, Comment
from zine.forms import FeedImportForm
from zine.utils import log
from zine.utils.admin import flash
from zine.utils.dates import parse_iso8601
from zine.utils.xml import Namespace, to_text
from zine.utils.http import redirect_to
from zine.utils.zeml import load_parser_data
from zine.utils.exceptions import UserException
from zine.utils.net import open_url
from zine.zxa import ZINE_NS, ATOM_NS, XML_NS, ZINE_TAG_URI, ZINE_CATEGORY_URI
zine = Namespace(ZINE_NS)
atom = Namespace(ATOM_NS)
xml = Namespace(XML_NS)
class SkipItem(Exception):
"""Raised by
* Extension.postprocess_post() if the post should be skipped
* Extension.lookup_author() if the author should be skipped
* Extension.tag_or_category() if the category should be skipped
"""
def _get_text_content(elements, fallback=True):
"""Return the text content from the best element match."""
for element in elements:
if element.attrib.get('type') == 'text':
return element.text or u''
if fallback:
if not elements:
return u''
for element in elements:
if element.attrib.get('type') == 'html':
return to_text(element)
return to_text(elements[0])
def _get_html_content(elements):
"""Returns the html content from the best element match or another
content treated as html. This is totally against the specification
but this importer assumes that the text representation is unprocessed
markup language from the blog. This is most likely a dialect of HTML
or a lightweight markup language in which case the importer only has
to switch the parser afterwards.
"""
if not elements:
return u''
for element in elements:
if element.attrib.get('type') == 'html':
return element.text
return elements[0].text
def _to_bool(value):
value = value.strip()
if value == 'yes':
return True
elif value == 'no':
return False
raise ValueError('invalid boolean literal, expected yes/no')
def _pickle(value):
if value:
return loads(value.decode('base64'))
def _parser_data(value):
if value:
return load_parser_data(value.decode('base64'))
def parse_feed(fd):
tree = etree.parse(fd).getroot()
if tree.tag == 'rss':
parser_class = RSSParser
elif tree.tag == atom.feed:
parser_class = AtomParser
else:
raise FeedImportError(_('Unknown feed uploaded.'))
parser = parser_class(tree)
parser.parse()
return parser.blog
class Parser(object):
feed_type = None
def __init__(self, tree):
self.app = get_application()
self.tree = tree
self.tags = []
self.categories = []
self.authors = []
self.posts = []
self.blog = None
self.extensions = [extension(self.app, self, tree)
for extension in self.app.feed_importer_extensions
if self.feed_type in extension.feed_types]
def find_tag(self, **criterion):
return self._find_criterion(self.tags, criterion)
def find_category(self, **criterion):
return self._find_criterion(self.categories, criterion)
def find_author(self, **criterion):
return self._find_criterion(self.authors, criterion)
def find_post(self, **criterion):
return self._find_criterion(self.posts, criterion)
def _find_criterion(self, sequence, d):
if len(d) != 1:
raise TypeError('one criterion expected')
key, value = d.iteritems().next()
for item in sequence:
if getattr(item, key, None) == value:
return item
class RSSParser(Parser):
feed_type = 'rss'
def __init__(self, tree):
raise FeedImportError(_('Importing of RSS feeds is currently '
'not possible.'))
class AtomParser(Parser):
feed_type = 'atom'
def __init__(self, tree):
Parser.__init__(self, tree)
self.global_author = None
# use for the category fallback handling if no extension
# takes over the handling.
self._categories_by_term = {}
# and the same for authors
self._authors_by_username = {}
self._authors_by_email = {}
def parse(self):
# atom allows the author to be defined for the whole feed
# before the entries. Capture it here.
self.global_author = self.tree.find(atom.author)
for entry in self.tree.findall(atom.entry):
post = self.parse_post(entry)
if post is not None:
self.posts.append(post)
self.blog = Blog(
self.tree.findtext(atom.title),
self.tree.findtext(atom.link),
self.tree.findtext(atom.subtitle),
self.tree.attrib.get(xml.lang, u'en'),
self.tags,
self.categories,
self.posts,
self.authors
)
self.blog.element = self.tree
for extension in self.extensions:
extension.handle_root(self.blog)
def parse_post(self, entry):
# parse the dates first.
updated = parse_iso8601(entry.findtext(atom.updated))
published = entry.findtext(atom.published)
if published is not None:
published = parse_iso8601(published)
else:
published = updated
# figure out tags and categories by invoking the
# callbacks on the extensions first. If no extension
# was able to figure out what to do with it, we treat it
# as category.
tags, categories = self.parse_categories(entry)
link = entry.find(atom.link)
if link is not None:
link = link.attrib.get('href')
post = Post(
None,
_get_text_content(entry.findall(atom.title)),
link,
published,
self.parse_author(entry),
# XXX: the Post is prefixing the intro before the actual
# content. This is the default Zine behavior and makes sense
# for Zine. However nearly every blog works differently and
# treats summary completely different from content. We should
# think about that.
None,
_get_html_content(entry.findall(atom.content)),
tags,
categories,
parser='html',
updated=updated,
uid=entry.findtext(atom.id)
)
post.element = entry
# now parse the comments for the post
self.parse_comments(post)
for extension in self.extensions:
try:
extension.postprocess_post(post)
except SkipItem:
return None
return post
def parse_author(self, entry):
"""Lookup the author for the given entry."""
def _remember_author(author):
if author.email is not None and \
author.email not in self._authors_by_email:
self._authors_by_email[author.email] = author
if author.username is not None and \
author.username not in self._authors_by_username:
self._authors_by_username[author.username] = author
author = entry.find(atom.author)
if author is None:
author = self.global_author
email = author.findtext(atom.email)
username = author.findtext(atom.name)
uri = author.findtext(atom.uri)
for extension in self.extensions:
try:
rv = extension.lookup_author(author, entry, username, email, uri)
except SkipItem:
return None
if rv is not None:
_remember_author(rv)
return rv
if email is not None and email in self._authors_by_email:
return self._authors_by_email[email]
if username in self._authors_by_username:
return self._authors_by_username[username]
author = Author(username, email)
_remember_author(author)
self.authors.append(author)
return author
def parse_categories(self, entry):
"""Is passed an <entry> element and parses all <category>
child elements. Returns a tuple with ``(tags, categories)``.
"""
def _remember_category(category, element):
term = element.attrib['term']
if term not in self._categories_by_term:
self._categories_by_term[term] = category
tags = []
categories = []
for category in entry.findall(atom.category):
for extension in self.extensions:
try:
rv = extension.tag_or_category(category)
except SkipItem:
break
if rv is not None:
if isinstance(rv, Tag):
tags.append(rv)
else:
categories.append(rv)
_remember_category(rv, category)
break
else:
rv = self._categories_by_term.get(category.attrib['term'])
if rv is None:
rv = Category(category.attrib['term'],
category.attrib.get('label'))
_remember_category(rv, category)
self.categories.append(rv)
categories.append(rv)
return tags, categories
def parse_comments(self, post):
"""Parse the comments for the post."""
for extension in self.extensions:
post.comments.extend(extension.parse_comments(post) or ())
class FeedImportError(UserException):
"""Raised if the system was unable to import the feed."""
class FeedImporter(Importer):
name = 'feed'
title = lazy_gettext(u'Feed Importer')
description = lazy_gettext(u'Handles ATOM feeds with optional extensions '
u'such as those exported by Zine itself. '
u'Plugins can add further extensions to be '
u'recognized by this importer.')
def configure(self, request):
form = FeedImportForm()
if request.method == 'POST' and form.validate(request.form):
feed = request.files.get('feed')
if form.data['download_url']:
try:
feed = open_url(form.data['download_url']).stream
except Exception, e:
log.exception(_('Error downloading feed'))
flash(_(u'Error downloading from URL: %s') % e, 'error')
if not feed:
return redirect_to('import/feed')
try:
blog = parse_feed(feed)
except Exception, e:
log.exception(_(u'Error parsing uploaded file'))
flash(_(u'Error parsing feed: %s') % e, 'error')
else:
self.enqueue_dump(blog)
flash(_(u'Added imported items to queue.'))
return redirect_to('admin/import')
return self.render_admin_page('admin/import_feed.html',
form=form.as_widget())
class Extension(object):
"""Extensions are instanciated for each parsing process."""
feed_types = frozenset()
def __init__(self, app, parser, root):
self.app = app
self.parser = parser
self.root = root
def handle_root(self, blog):
"""Called after the whole feed was parsed into a blog object."""
def postprocess_post(self, post):
"""Postprocess the post.
If this method raises `SkipItem`, the post is thrown away without
giving it to other Extensions.
"""
def tag_or_category(self, element):
"""Passed a <category> element for Atom feeds. Has to return a
category or tag object or `None` if it's not handled by this
extension.
Categories and tags have to be stored in `parser.categories` or
`parser.tags` so that the category/tag is actually unique. The
extension also has to look there first for matching categories.
If this method raises `SkipItem`, the category is immediately
thrown away without giving it to other Extensions or using the
default heuristic to create a category.
"""
def lookup_author(self, author, entry, username, email, uri):
"""Lookup the author for an element. `author` is an element
that points to the author relevant element for the feed.
`entry` points to the whole entry element.
Authors have to be stored in `parser.authors` to ensure they
are unique. Extensions have to look there first for matching
author objects. If an extension does not know how to handle
the element `None` must be returned.
If this method raises `SkipItem`, the author is immediately
thrown away without giving it to other Extensions or using
the default heuristic to create an author object.
"""
def parse_comments(self, post):
"""Parse the comments for the given post. If the extension
could locate comments for this post it has to return a list
of those comments, otherwise return `None`.
"""
class ZEAExtension(Extension):
"""Handles Zine Atom extensions. This extension can handle the extra
namespace used for ZEA feeds as generated by the Zine export. Because
in a feed with Zine extensions the rules are pretty strict we don't
look up authors, tags or categories on the parser object like we should
but have a mapping for those directly on the extension.
"""
feed_types = frozenset(['atom'])
def __init__(self, app, parser, root):
Extension.__init__(self, app, parser, root)
self._authors = {}
self._tags = {}
self._categories = {}
self._dependencies = root.find(zine.dependencies)
self._lookup_user = etree.XPath('./zine:user[@dependency=$id]',
namespaces={'zine': ZINE_NS})
def _parse_config(self, element):
result = {}
if element is not None:
for element in element.findall(zine.item):
result[element.attrib['key']] = element.text
return result
def _get_author(self, dependency):
author = self._authors.get(dependency)
if author is None:
element = self._lookup_user(self._dependencies,
id=str(dependency))[0]
author = Author(
element.findtext(zine.username),
element.findtext(zine.email),
element.findtext(zine.real_name),
element.findtext(zine.description),
element.findtext(zine.www),
element.findtext(zine.pw_hash),
_to_bool(element.findtext(zine.is_author)),
_pickle(element.findtext(zine.extra))
)
for privilege in element.findall(zine.privilege):
p = self.app.privileges.get(privilege.text)
if p is not None:
author.privileges.add(p)
self._authors[dependency] = author
self.parser.authors.append(author)
return author
def _parse_tag(self, element):
term = element.attrib['term']
if term not in self._tags:
self._tags[term] = Tag(term, element.attrib.get('label'))
return self._tags[term]
def _parse_category(self, element):
term = element.attrib['term']
if term not in self._categories:
self._categories[term] = Category(term, element.attrib.get('label'),
element.findtext(zine.description))
return self._categories[term]
def handle_root(self, blog):
blog.configuration.update(self._parse_config(
blog.element.find(zine.configuration)))
def postprocess_post(self, post):
if post.parser_data is not None:
return
post.parser_data = _parser_data(post.element.findtext(zine.parser_data))
content = _get_text_content(post.element.findall(atom.content),
fallback=False)
if content is not None:
post.body = content
content_type = post.element.findtext(zine.content_type)
if content_type is not None:
post.content_type = content_type
def lookup_author(self, author, entry, username, email, uri):
dependency = author.attrib.get(zine.dependency)
if dependency is not None:
return self._get_author(dependency)
def tag_or_category(self, element):
scheme = element.attrib.get('scheme')
if scheme == ZINE_TAG_URI:
return self._parse_tag(element)
elif scheme == ZINE_CATEGORY_URI:
return self._parse_category(element)
def parse_comments(self, post):
comments = {}
unresolved_parents = {}
for element in post.element.findall(zine.comment):
author = element.find(zine.author)
dependency = author.attrib.get('dependency')
if dependency is not None:
author = self._get_author(dependency)
email = www = None
else:
email = author.findtext(zine.email)
www = author.findtext(zine.uri)
author = author.findtext(zine.name)
body = _get_html_content(element.findall(zine.content))
comment = Comment(author, body, email, www, None,
parse_iso8601(element.findtext(zine.published)),
element.findtext(zine.submitter_ip), 'html',
_to_bool(element.findtext(zine.is_pingback)),
int(element.findtext(zine.status)),
element.findtext(zine.blocked_msg),
_parser_data(element.findtext(zine.parser_data)))
comments[int(element.attrib['id'])] = comment
parent = element.findtext(zine.parent)
if parent:
unresolved_parents[comment] = int(parent)
for comment, parent_id in unresolved_parents.iteritems():
comment.parent = comments[parent_id]
return comments.values()
extensions = [ZEAExtension]
| bsd-3-clause | a58e56b7a6fcb3d73fb0b82629cedd5d | 34.649907 | 81 | 0.58504 | 4.42329 | false | false | false | false |
mitsuhiko/zine | zine/plugins/rst_parser/__init__.py | 1 | 4263 | # -*- coding: utf-8 -*-
"""
zine.plugins.rst_parser
~~~~~~~~~~~~~~~~~~~~~~~
Adds support for reStructuredText in posts.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from zine.i18n import lazy_gettext
from zine.parsers import BaseParser
from zine.utils.zeml import Element, sanitize
from zine.plugins.rst_parser.translator import ZemlTranslator
from docutils import nodes, utils
from docutils.core import publish_string
from docutils.writers import Writer
from docutils.parsers.rst import roles, directives, Directive
class zeml(nodes.Element):
"""Docutils node to insert a raw ZEML tree."""
class intro(nodes.Element):
"""Docutils node to insert an intro section."""
def make_extension_directive(app, extension):
class ExtDirective(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = True
def run(self):
if self.arguments:
self.options[extension.argument_attribute] = self.arguments[0]
content = '\n'.join(self.content)
reason = self.state.document.settings.parsing_reason
if not extension.is_isolated:
content_tmp = RstParser(app).parse(content, reason)
content = Element('div')
content.children = content_tmp.children
for child in content.children:
child.parent = content
element = extension.process(self.options, content, reason)
return [zeml(zeml=element)]
for attrname in extension.attributes:
ExtDirective.option_spec[attrname] = directives.unchanged
if extension.argument_attribute in extension.attributes:
ExtDirective.optional_arguments = 1
if extension.is_void:
ExtDirective.has_content = False
# give it a nice non-generic name
ExtDirective.__name__ = '%s_directive' % extension.name
return ExtDirective
class IntroDirective(Directive):
required_arguments = 0
optional_arguments = 0
has_content = True
def run(self):
node = intro()
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def make_extension_role(extension):
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
if not extension.is_isolated:
content = Element('span')
content.text = utils.unescape(text)
else:
content = utils.unescape(text)
element = extension.process({}, content)
return [zeml(zeml=element)], []
role.__name__ = '%s_role' % extension.name
return role
class ZemlWriter(Writer):
"""Writer to convert a docutils nodetree to a ZEML nodetree."""
supported = ('zeml',)
output = None
def translate(self):
visitor = ZemlTranslator(self.document)
self.document.walkabout(visitor)
self.output = visitor.root
class RstParser(BaseParser):
"""A parser for reStructuredText."""
name = lazy_gettext('reStructuredText')
extensions_registered = False
def parse(self, input_data, reason):
if not RstParser.extensions_registered:
# need to do this only once...
directives.register_directive('intro', IntroDirective)
for extension in self.app.markup_extensions:
if extension.is_block_level:
directives.register_directive(
extension.name,
make_extension_directive(self.app, extension))
else:
roles.register_local_role(
extension.name, make_extension_role(extension))
RstParser.extensions_registered = True
settings_overrides = {
'file_insertion_enabled': False,
'parsing_reason': reason,
}
rv = publish_string(source=input_data, writer=ZemlWriter(),
settings_overrides=settings_overrides)
if reason == 'comment':
rv = sanitize(rv)
return rv
def setup(app, plugin):
app.add_parser('rst', RstParser)
| bsd-3-clause | 4907538e036d4d1c100adfd718642053 | 32.304688 | 78 | 0.623739 | 4.319149 | false | false | false | false |
mitsuhiko/zine | zine/utils/debug.py | 1 | 2501 | # -*- coding: utf-8 -*-
"""
zine.utils.debug
~~~~~~~~~~~~~~~~
This module provides various debugging helpers.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
from werkzeug import escape
from zine.application import url_for
_body_end_re = re.compile(r'</\s*(body|html)(?i)')
def find_calling_context(skip=2):
"""Finds the calling context."""
frame = sys._getframe(skip)
while frame.f_back is not None:
name = frame.f_globals.get('__name__')
if name and name.startswith('zine.'):
funcname = frame.f_code.co_name
if 'self' in frame.f_locals:
funcname = '%s.%s of %s' % (
frame.f_locals['self'].__class__.__name__,
funcname,
hex(id(frame.f_locals['self']))
)
return '%s:%s (%s)' % (
frame.f_code.co_filename,
frame.f_lineno,
funcname
)
frame = frame.f_back
return '<unknown>'
def render_query_table(queries):
"""Renders a nice table of all queries in the page."""
total = 0
stylesheet = url_for('core/shared', filename='debug.css')
result = [u'<style type="text/css">@import url(%s)</style>' % stylesheet,
u'<div class="_database_debug_table"><ul>']
for statement, parameters, start, end, calling_context in queries:
total += (end - start)
result.append(u'<li><pre>%s</pre><div class="detail"><em>%s</em> | '
u'<strong>took %.3f ms</strong></div></li>' % (
statement,
escape(calling_context),
(end - start) * 1000
))
result.append(u'<li><strong>%d queries in %.2f ms</strong></ul></div>' % (
len(queries),
total * 1000
))
return u'\n'.join(result)
def inject_query_info(request, response):
"""Injects the collected queries into the response."""
if not request.queries:
return
debug_info = render_query_table(request.queries).encode(response.charset)
body = response.data
match = _body_end_re.search(body)
if match is not None:
pos = match.start()
response.data = body[:pos] + debug_info + body[pos:]
else:
response.data = body + debug_info
if 'content-length' in response.headers:
response.headers['content-length'] = len(response.data)
| bsd-3-clause | 081d819c0e650e59cbaf76b81e1032c8 | 30.658228 | 78 | 0.557777 | 3.777946 | false | false | false | false |
globocom/database-as-a-service | dbaas/logical/tests/test_database.py | 1 | 14479 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import mock
import logging
from django.test import TestCase
from django.db import IntegrityError
from drivers import base
from maintenance.tests import factory as maintenance_factory
from physical.tests import factory as physical_factory
from physical.models import DatabaseInfra
from logical.tests import factory
from notification.tests.factory import TaskHistoryFactory
from notification.models import TaskHistory
from logical.models import Database, DatabaseHistory
LOG = logging.getLogger(__name__)
ERROR_CLONE_WITHOUT_PERSISTENCE = \
"Database does not have persistence cannot be cloned"
ERROR_CLONE_IN_QUARANTINE = "Database in quarantine cannot be cloned"
ERROR_CLONE_NOT_ALIVE = "Database is not alive and cannot be cloned"
ERROR_DELETE_PROTECTED = "Database {} is protected and cannot be deleted"
ERROR_DELETE_DEAD = "Database {} is not alive and cannot be deleted"
ERROR_UPGRADE_MONGO24 = "MongoDB 2.4 cannot be upgraded by this task."
ERROR_UPGRADE_IN_QUARANTINE = "Database in quarantine and cannot be upgraded."
ERROR_UPGRADE_IS_DEAD = "Database is dead and cannot be upgraded."
ERROR_UPGRADE_NO_EQUIVALENT_PLAN = "Source plan do not has equivalent plan to upgrade."
UPGRADE_URL = "/admin/logical/database/{}/upgrade/"
UPGRADE_RETRY_URL = "/admin/logical/database/{}/upgrade_retry/"
class FakeDriver(base.BaseDriver):
def get_connection(self):
return 'connection-url'
class DatabaseTestCase(TestCase):
def setUp(self):
self.instance = physical_factory.InstanceFactory()
self.databaseinfra = self.instance.databaseinfra
self.engine = FakeDriver(databaseinfra=self.databaseinfra)
self.environment = physical_factory.EnvironmentFactory()
self.plan_upgrade = physical_factory.PlanFactory()
def tearDown(self):
self.engine = None
def test_create_database(self):
database = Database(name="blabla", databaseinfra=self.databaseinfra,
environment=self.environment)
database.save()
self.assertTrue(database.pk)
def test_create_duplicate_database_error(self):
database = Database(name="bleble", databaseinfra=self.databaseinfra,
environment=self.environment)
database.save()
self.assertTrue(database.pk)
self.assertRaises(IntegrityError, Database(name="bleble",
databaseinfra=self.databaseinfra,
environment=self.environment).save)
def test_slugify_database_name_with_spaces(self):
database = factory.DatabaseFactory.build(name="w h a t",
databaseinfra=self.databaseinfra,
environment=self.environment)
database.full_clean()
database.save()
self.assertTrue(database.id)
self.assertEqual(database.name, 'w_h_a_t')
def test_slugify_database_name_with_dots(self):
database = factory.DatabaseFactory.build(name="w.h.e.r.e",
databaseinfra=self.databaseinfra,
environment=self.environment)
database.full_clean()
database.save()
self.assertTrue(database.id)
self.assertEqual(database.name, 'w_h_e_r_e')
def test_cannot_edit_database_name(self):
database = factory.DatabaseFactory(name="w h a t",
databaseinfra=self.databaseinfra,
environment=self.environment)
self.assertTrue(database.id)
database.name = "super3"
self.assertRaises(AttributeError, database.save)
@mock.patch.object(DatabaseInfra, 'get_info')
def test_new_database_bypass_datainfra_info_cache(self, get_info):
def side_effect_get_info(force_refresh=False):
m = mock.Mock()
if not force_refresh:
m.get_database_status.return_value = None
return m
m.get_database_status.return_value = object()
return m
get_info.side_effect = side_effect_get_info
database = factory.DatabaseFactory(name="db1cache",
databaseinfra=self.databaseinfra,
environment=self.environment)
self.assertIsNotNone(database.database_status)
self.assertEqual(
[mock.call(), mock.call(force_refresh=True)], get_info.call_args_list)
def test_can_update_volume_used_disk_size(self):
database = factory.DatabaseFactory()
database.databaseinfra = self.databaseinfra
volume = physical_factory.VolumeFactory()
volume.host = self.instance.hostname
volume.save()
old_used_size = volume.used_size_kb
volume = database.update_host_disk_used_size(
host_address=self.instance.address, used_size_kb=300
)
self.assertNotEqual(volume.used_size_kb, old_used_size)
self.assertEqual(volume.used_size_kb, 300)
old_used_size = volume.used_size_kb
volume = database.update_host_disk_used_size(
host_address=self.instance.address, used_size_kb=500
)
self.assertNotEqual(volume.used_size_kb, old_used_size)
self.assertEqual(volume.used_size_kb, 500)
def test_cannot_update_volume_used_disk_size_host_not_volume(self):
database = factory.DatabaseFactory()
database.databaseinfra = self.databaseinfra
volume = database.update_host_disk_used_size(
host_address=self.instance.address, used_size_kb=300
)
self.assertIsNone(volume)
def test_can_clone(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
can_be_cloned, error = database.can_be_cloned()
self.assertTrue(can_be_cloned)
self.assertIsNone(error)
def test_cannot_clone_no_persistence(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.plan.has_persistence = False
can_be_cloned, error = database.can_be_cloned()
self.assertFalse(can_be_cloned)
self.assertEqual(error, ERROR_CLONE_WITHOUT_PERSISTENCE)
def test_cannot_clone_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_in_quarantine = True
can_be_cloned, error = database.can_be_cloned()
self.assertFalse(can_be_cloned)
self.assertEqual(error, ERROR_CLONE_IN_QUARANTINE)
def test_cannot_clone_dead(self):
database = factory.DatabaseFactory()
database.status = database.DEAD
database.database_status = None
can_be_cloned, error = database.can_be_cloned()
self.assertFalse(can_be_cloned)
self.assertEqual(error, ERROR_CLONE_NOT_ALIVE)
def test_can_delete(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
can_be_deleted, error = database.can_be_deleted()
self.assertTrue(can_be_deleted)
self.assertIsNone(error)
@mock.patch('logical.models.factory_for')
@mock.patch('logical.models.Database.automatic_create_first_credential')
def test_insert_on_database_history_when_delete(self, cred_mock, factory_mock):
database = factory.DatabaseFactory(
name='test_fake_name',
description='__test__ fake desc'
)
database_id = database.id
database.is_in_quarantine = True
database.is_protected = False
database.status = database.ALIVE
database.environment.name = '__test__ fake env'
database.project.name = '__test__ proj name'
database.team.name = '__test__ team name'
database.plan.name = '__test__ plan name'
database.databaseinfra.name = '__test__ infra name'
database.databaseinfra.engine.version = 'v1.2.3'
database.databaseinfra.plan.has_persistence = False
database.databaseinfra.engine.engine_type.name = '__test__ fake engine type'
database.databaseinfra.disk_offering.size_kb = 1234
database.delete()
deleted_databases = DatabaseHistory.objects.filter(database_id=database_id)
self.assertEqual(len(deleted_databases), 1)
deleted_database = deleted_databases[0]
self.assertEqual(deleted_database.database_id, database_id)
self.assertEqual(deleted_database.name, 'test_fake_name')
self.assertEqual(deleted_database.description, '__test__ fake desc')
self.assertEqual(deleted_database.engine, '__test__ fake engine type v1.2.3')
self.assertEqual(deleted_database.project, '__test__ proj name')
self.assertEqual(deleted_database.team, '__test__ team name')
self.assertEqual(deleted_database.databaseinfra_name, '__test__ infra name')
self.assertEqual(deleted_database.plan, '__test__ plan name')
self.assertEqual(deleted_database.disk_size_kb, 1234)
self.assertFalse(deleted_database.has_persistence)
self.assertEqual(deleted_database.environment, '__test__ fake env')
def test_cannot_delete_protected(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_protected = True
can_be_deleted, error = database.can_be_deleted()
self.assertFalse(can_be_deleted)
self.assertEqual(error, ERROR_DELETE_PROTECTED.format(database.name))
def test_can_delete_protected_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_protected = True
database.is_in_quarantine = True
can_be_deleted, error = database.can_be_deleted()
self.assertTrue(can_be_deleted)
self.assertIsNone(error)
def test_can_delete_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_in_quarantine = True
can_be_deleted, error = database.can_be_deleted()
self.assertTrue(can_be_deleted)
self.assertIsNone(error)
def test_can_upgrade(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.databaseinfra.plan.engine_equivalent_plan = self.plan_upgrade
can_do_upgrade, error = database.can_do_upgrade()
self.assertTrue(can_do_upgrade)
self.assertIsNone(error)
def test_cannot_upgrade_mongo24(self):
mongo = physical_factory.EngineTypeFactory()
mongo.name = 'mongodb'
mongo24 = physical_factory.EngineFactory()
mongo24.engine_type = mongo
mongo24.version = '2.4.xxx'
database = factory.DatabaseFactory()
database.status = database.ALIVE
infra = database.databaseinfra
infra.engine = mongo24
database.databaseinfra = infra
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_MONGO24)
def test_cannot_upgrade_in_quarantine(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
database.is_in_quarantine = True
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_IN_QUARANTINE)
def test_cannot_upgrade_dead(self):
database = factory.DatabaseFactory()
database.databaseinfra.plan.engine_equivalent_plan = self.plan_upgrade
database.status = database.DEAD
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_IS_DEAD)
def test_cannot_upgrade_no_equivalent_plan(self):
database = factory.DatabaseFactory()
database.status = database.ALIVE
can_do_upgrade, error = database.can_do_upgrade()
self.assertFalse(can_do_upgrade)
self.assertEqual(error, ERROR_UPGRADE_NO_EQUIVALENT_PLAN)
def test_get_upgrade_url(self):
database = factory.DatabaseFactory()
expected_url = UPGRADE_URL.format(database.id)
returned_url = database.get_upgrade_url()
self.assertEqual(returned_url, expected_url)
def test_get_upgrade_retry_url(self):
database = factory.DatabaseFactory()
expected_url = UPGRADE_RETRY_URL.format(database.id)
returned_url = database.get_upgrade_retry_url()
self.assertEqual(returned_url, expected_url)
def test_last_successful_upgrade(self):
database = factory.DatabaseFactory()
self.assertIsNone(database.last_successful_upgrade)
upgrade = maintenance_factory.DatabaseUpgradeFactory()
upgrade.database = database
upgrade.save()
self.assertIsNone(database.last_successful_upgrade)
upgrade.set_success()
self.assertEqual(database.last_successful_upgrade, upgrade)
def test_last_successful_upgrade_with_error(self):
database = factory.DatabaseFactory()
upgrade = maintenance_factory.DatabaseUpgradeFactory()
upgrade.database = database
upgrade.set_error()
self.assertIsNone(database.last_successful_upgrade)
def test_current_task_lock(self):
database = factory.DatabaseFactory()
task1 = TaskHistoryFactory()
task2 = TaskHistoryFactory()
database.pin_task(task1)
self.assertFalse(database.pin_task(task2))
database.unpin_task()
self.assertTrue(database.pin_task(task2))
def test_lock_retry(self):
database = factory.DatabaseFactory()
task1 = TaskHistoryFactory()
task2 = TaskHistoryFactory()
task3 = TaskHistoryFactory()
task1.task_status = TaskHistory.STATUS_ERROR
task1.save()
task2.task_name = task1.task_name
task2.save()
database.pin_task(task1)
self.assertFalse(database.update_task(task3))
self.assertTrue(database.update_task(task2))
self.assertFalse(database.update_task(task2))
database.unpin_task()
self.assertTrue(database.pin_task(task3))
| bsd-3-clause | 8ebaa4b72a4b634c10d20ce64b541a2c | 37.507979 | 87 | 0.655225 | 4.099377 | false | true | false | false |
globocom/database-as-a-service | dbaas/account/forms/role.py | 1 | 1394 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.models import User
from ..models import Role
# as in
# http://stackoverflow.com/questions/6097210/assign-user-objects-to-a-group-while-editing-group-object-in-django-admin
class RoleAdminForm(forms.ModelForm):
users = forms.ModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name=_('Users'),
is_stacked=False
)
)
class Meta:
model = Role
def __init__(self, *args, **kwargs):
super(RoleAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['users'].initial = self.instance.user_set.all()
def save(self, commit=True):
role = super(RoleAdminForm, self).save(commit=commit)
if commit:
role.user_set = self.cleaned_data['users']
else:
old_save_m2m = self.save_m2m
def new_save_m2m():
old_save_m2m()
role.user_set = self.cleaned_data['users']
self.save_m2m = new_save_m2m
return role
| bsd-3-clause | 86b4b4f36e5632fb284d587b73bb5185 | 29.977778 | 118 | 0.635581 | 3.819178 | false | false | false | false |
globocom/database-as-a-service | dbaas/physical/migrations/0057_auto__add_field_instance_used_size_in_bytes__add_field_instance_total_.py | 1 | 19439 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Instance.used_size_in_bytes'
db.add_column(u'physical_instance', 'used_size_in_bytes',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Instance.total_size_in_bytes'
db.add_column(u'physical_instance', 'total_size_in_bytes',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Instance.used_size_in_bytes'
db.delete_column(u'physical_instance', 'used_size_in_bytes')
# Deleting field 'Instance.total_size_in_bytes'
db.delete_column(u'physical_instance', 'total_size_in_bytes')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical'] | bsd-3-clause | 4d27110c9a9ccf226b3c3f9b2f36c536 | 89 | 227 | 0.565718 | 3.57072 | false | false | false | false |
dit/dit | dit/utils/latexarray.py | 1 | 8791 | """
Functions to convert pmfs to latex arrays.
"""
import contextlib
import os
import subprocess # noqa: S404
import tempfile
from debtcollector import removals
import numpy as np
import numpy.core.arrayprint as arrayprint
from .context import cd, named_tempfile, tempdir
from .misc import default_opener
__all__ = (
'printoptions',
'to_latex',
'to_pdf',
)
# http://stackoverflow.com/questions/2891790/pretty-printing-of-numpy-array
#
# Includes hack to prevent NumPy from removing trailing zeros.
#
@removals.remove(message="Please use np.core.arrayprint.printoptions",
version='1.2.3')
@contextlib.contextmanager
def printoptions(strip_zeros=True, **kwargs):
if strip_zeros:
kwargs['trim'] = 'k'
origcall = arrayprint.FloatingFormat.__call__
def __call__(self, x):
return origcall.__call__(self, x)
arrayprint.FloatingFormat.__call__ = __call__
original = np.get_printoptions()
np.set_printoptions(**kwargs)
yield
np.set_printoptions(**original)
arrayprint.FloatingFormat.__call__ = origcall
def to_latex__numerical(a, decimals, tab):
# The elements of each column are aligned on the decimal, if present.
# Spacing is automatically adjusted depending on if `a` contains negative
# numbers or not. For float data, trailing zeros are included so that
# array output is uniform.
array = np.atleast_2d(a)
array = np.around(array, decimals)
# Determine the number of digits left of the decimal.
# Grab integral part, convert to string, and get maximum length.
# This does not handle negative signs appropriately since -0.5 goes to 0.
# So we make sure it never counts a negative sign by taking the abs().
integral = np.abs(np.trunc(array.flat).astype(int))
left_digits = max(map(len, map(str, integral)))
# Adjust for negative signs.
if np.any(array < 0):
left_digits += 1
# Set decimal digits to 0 if data are not floats.
try:
np.finfo(array.dtype)
except ValueError:
decimals = 0
# Align the elements on the decimal, making room for largest element.
coltype = r"\newcolumntype{{X}}{{D{{.}}{{.}}{{{0},{1}}}}}"
coltype = coltype.format(left_digits, decimals)
# Specify that we want all columns to have the same column type.
nCols = array.shape[1]
cols = r"*{{{nCols}}}{{X}}".format(nCols=nCols)
# Build the lines in the array.
#
# In general, we could just use the rounded array and map(str, row),
# but NumPy strips trailing zeros on floats (undesirably). So we make
# use of the context manager to prevent that.
options = {
'precision': decimals,
'suppress': True,
'strip_zeros': False,
'threshold': nCols + 1,
}
with printoptions(**options):
lines = []
for row in array:
# Strip [ and ], remove newlines, and split on whitespace
elements = row.__str__()[1:-1].replace('\n', '').split()
# hack to fix trailing zeros, really the numpy stuff needs to be updated.
try:
elements = [element + '0' * (decimals - len(element.split('.')[1])) for element in elements]
except: # noqa; S110
pass
line = [tab, ' & '.join(elements), r' \\']
lines.append(''.join(line))
# Remove the \\ on the last line.
lines[-1] = lines[-1][:-3]
# Create the LaTeX code
subs = {'coltype': coltype, 'cols': cols, 'lines': '\n'.join(lines)}
template = r"""{coltype}
\begin{{array}}{{{cols}}}
{lines}
\end{{array}}"""
return template.format(**subs)
def to_latex__exact(a, tol, tab):
from dit.math import approximate_fraction
array = np.atleast_2d(a)
to_frac = lambda f: approximate_fraction(f, tol)
fractions = np.array(list(map(to_frac, array.flat))).reshape(array.shape)
# Specify that we want all columns to have the same column type.
nCols = array.shape[1]
cols = r"*{{{nCols}}}{{c}}".format(nCols=nCols)
def to_frac(f):
if f.denominator != 1:
return r'\frac{{{}}}{{{}}}'.format(f.numerator, f.denominator)
else:
return str(f.numerator)
# Build the lines in the array.
lines = []
for row in fractions:
# Strip [ and ], remove newlines, and split on whitespace
elements = map(to_frac, row)
line = [tab, ' & '.join(elements), r' \\']
lines.append(''.join(line))
# Remove the \\ on the last line.
lines[-1] = lines[-1][:-3]
# Create the LaTeX code
subs = {'cols': cols, 'lines': '\n'.join(lines)}
template = r"""\begin{{array}}{{{cols}}}
{lines}
\end{{array}}"""
return template.format(**subs)
def to_latex(a, exact=False, decimals=3, tab=' '):
r"""
Convert an array-like object into a LaTeX array.
Parameters
----------
a : array-like
A list, tuple, NumPy array, etc. The elements are written into a
LaTeX array environment.
exact : bool
When `exact` is False, the elements of each column are aligned on the
decimal, if present. Spacing is automatically adjusted depending on if
`a` contains negative numbers or not. For float data, trailing zeros
are included so that array output is uniform.
When `exact` is `True`, then each float is turned into a fraction and
placed in a column. One may specify `exact` as a small float to be used
as the tolerance while converting to a fraction.
decimals : int
The number of decimal places to round to before writing to LaTeX.
tab : str
The tab character to use for indentation within LaTeX.
Examples
--------
>>> x = [1,2,-3]
>>> print to_latex(x)
\newcolumntype{X}{D{.}{.}{2,0}}
\begin{array}{*{3}{X}}
1 & 2 & -3
\end{array}
>>> import numpy as np
>>> np.random.seed(0)
>>> y = np.random.rand(12).reshape(4,3) - 0.5
>>> print to_latex(y, decimals=2)
\newcolumntype{X}{D{.}{.}{2,2}}
\begin{array}{*{3}{X}}
0.05 & 0.22 & 0.10 \\
0.04 & -0.08 & 0.15 \\
-0.06 & 0.39 & 0.46 \\
-0.12 & 0.29 & 0.03
\end{array}
Notes
-----
The resultant array environment should be used within LaTeX's mathmode,
and the following should appear in the preamble:
\usepackage{array}
\usepackage{dcolumn}
"""
if exact:
if exact is True:
tol = 1e-6
else:
tol = exact
return to_latex__exact(a, tol, tab)
else:
return to_latex__numerical(a, decimals, tab)
def to_pdf(a, exact=False,
decimals=3,
line="p(x) = \\left[\n{0}\n\\right]",
show=True):
"""
Converts a NumPy array to a LaTeX array, compiles and displays it.
Examples
--------
>>> a = np.array([[.1, .23, -1.523],[2.1, .23, .523]])
>>> to_pdf(a) # pragma: no cover
"""
template = r"""\documentclass{{article}}
\usepackage{{amsmath}}
\usepackage{{array}}
\usepackage{{dcolumn}}
\pagestyle{{empty}}
\begin{{document}}
\begin{{displaymath}}
{0}
\end{{displaymath}}
\end{{document}}"""
fline = line.format(to_latex(a, exact=exact, decimals=decimals))
latex = template.format(fline)
with contextlib.ExitStack() as stack: # pragma: no cover
EC = stack.enter_context
tmpdir = EC(tempdir())
EC(cd(tmpdir))
latexfobj = EC(named_tempfile(dir=tmpdir, suffix='.tex'))
# Write the latex file
latexfobj.write(latex.encode('utf8'))
latexfobj.close()
# Compile to PDF
args = ['pdflatex', '-interaction=batchmode', latexfobj.name]
with open(os.devnull, 'w') as fp:
subprocess.call(args, stdout=fp, stderr=fp) # noqa: S603
subprocess.call(args, stdout=fp, stderr=fp) # noqa: S603
# Create another tempfile which will not be deleted.
pdffobj = tempfile.NamedTemporaryFile(suffix='_pmf.pdf', delete=False)
pdffobj.close()
# Crop the PDF and copy to persistent tempfile.
pdfpath = latexfobj.name[:-3] + 'pdf'
# Cannot add &>/dev/null to cmd, as Ghostscript is unable to find the
# input file. This seems to be some weird interaction between
# subprocess and pdfcrop. Also, we need to use shell=True since some
# versions of pdfcrop rely on a hack to determine what perl interpreter
# to call it with.
cmd = r'pdfcrop --debug {} {}'.format(pdfpath, pdffobj.name)
with open(os.devnull, 'w') as fp:
subprocess.call(cmd, stdout=fp) # noqa: S603
# Open the PDF
if show:
default_opener(pdffobj.name)
return pdffobj.name
| bsd-3-clause | d0b1f66a31b5cd15c2224f4b20988fa5 | 29.630662 | 108 | 0.602776 | 3.638659 | false | false | false | false |
dit/dit | dit/divergences/hypercontractivity_coefficient.py | 1 | 4583 | """
Compute the hypercontractivity coefficient:
s*(X||Y) = max_{U - X - Y} I[U:Y]/I[U:X]
"""
import numpy as np
from ..algorithms import BaseAuxVarOptimizer
from ..exceptions import ditException
from ..helpers import normalize_rvs
from ..multivariate.entropy import entropy
from ..multivariate.total_correlation import total_correlation
__all__ = (
'HypercontractivityCoefficient',
'hypercontractivity_coefficient',
)
class HypercontractivityCoefficient(BaseAuxVarOptimizer):
"""
Computes the hypercontractivity coefficient:
.. math::
max_{U - X - Y} I[U:Y] / I[U:X]
"""
_shotgun = 5
def __init__(self, dist, rv_x=None, rv_y=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rv_x : iterable
The variables to consider `X`.
rv_y : iterable
The variables to consider `Y`.
bound : int, None
Specifies a bound on the size of the auxiliary random variable. If None,
then the theoretical bound is used.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
self._x = {0}
self._y = {1}
self._u = {3}
super().__init__(dist, [rv_x, rv_y], [], rv_mode=rv_mode)
theoretical_bound = self._full_shape[self._proxy_vars[0]] + 1
bound = min(bound, theoretical_bound) if bound else theoretical_bound
self._construct_auxvars([({0}, bound)])
def _objective(self):
"""
The hypercontractivity coefficient to minimize.
Returns
-------
obj : func
The objective function.
"""
mi_a = self._mutual_information(self._u, self._y)
mi_b = self._mutual_information(self._u, self._x)
def objective(self, x):
"""
Compute :math:`I[U:Y] / I[U:X]`
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
a = mi_a(pmf)
b = mi_b(pmf)
return -(a / b) if not np.isclose(b, 0.0) else np.inf
return objective
def hypercontractivity_coefficient(dist, rvs, bound=None, niter=None, rv_mode=None):
"""
Computes the hypercontractivity coefficient:
.. math::
s*(X||Y) = max_{U - X - Y} I[U:Y]/I[U:X]
Parameters
----------
dist : Distribution
The distribution of interest.
rvs : iterable of iterables, len(rvs) == 2
The variables to compute the hypercontractivity coefficient of.
Order is important.
bound : int, None
An external bound on the size of `U`. If None, :math:`|U| <= |X|+1`.
niter : int, None
The number of basin-hopping steps to perform. If None, use the default.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
Returns
-------
hc : float
The hypercontractivity coefficient.
"""
rvs, _, rv_mode = normalize_rvs(dist, rvs, None, rv_mode)
if len(rvs) != 2:
msg = 'Hypercontractivity coefficient can only be computed for 2 variables, not {}.'.format(len(rvs))
raise ditException(msg)
# test some special cases:
if np.isclose(total_correlation(dist, rvs), 0.0):
return 0.0
elif np.isclose(entropy(dist, rvs[1], rvs[0]), 0.0):
return 1.0
else:
hc = HypercontractivityCoefficient(dist, rvs[0], rvs[1], bound=bound, rv_mode=rv_mode)
hc.optimize(niter=niter)
return -hc.objective(hc._optima)
| bsd-3-clause | 401bb72b3f907725e05d779ed794b4ef | 31.048951 | 109 | 0.578442 | 3.835146 | false | false | false | false |
dit/dit | dit/algorithms/maxentropy.py | 1 | 22700 | """
Maximum entropy with marginal distribution constraints.
Note: We are actually doing the maximum entropy optimization. So we have not
built in the fact that the solution is an exponential family.
Also, this doesn't seem to work that well in practice. The optimization
simply fails to converge for many distributions. Xor() works great, but And()
fails to converge for 2-way marginals. Random distributions seem to work.
Jittering the distributions sometimes helps.
We might need to assume the exponential form and then fit the params to match
the marginals. Perhaps exact gradient and Hessians might help, or maybe even
some rescaling of the linear constraints.
TODO:
This code for moment-based maximum entropy needs to be updated so that it can
handle any Cartesian product sample space, rather than just homogeneous ones.
"""
from debtcollector import removals
import itertools
import numpy as np
import dit
from dit.abstractdist import AbstractDenseDistribution, get_abstract_dist
from ..helpers import RV_MODES, parse_rvs
from .optutil import as_full_rank, CVXOPT_Template, prepare_dist, Bunch
from ..utils import flatten
# from ..utils import powerset
__all__ = (
# 'MarginalMaximumEntropy',
'MomentMaximumEntropy',
# Use version provided by maxentropyfw.py
# 'marginal_maxent_dists',
'moment_maxent_dists',
)
@removals.remove(message="Please see methods in dit.algorithms.distribution_optimizers.py.",
version='1.0.1')
def isolate_zeros_generic(dist, rvs):
"""
Determines if there are any elements of the optimization vector that must
be zero.
If p(marginal) = 0, then every component of the joint that contributes to
that marginal probability must be exactly zero for all feasible solutions.
"""
assert dist.is_dense()
assert dist.get_base() == 'linear'
rvs_, indexes = parse_rvs(dist, set(flatten(rvs)), unique=True, sort=True)
rvs = [[indexes[rvs_.index(rv)] for rv in subrv] for subrv in rvs]
d = get_abstract_dist(dist)
n_elements = d.n_elements
zero_elements = np.zeros(n_elements, dtype=int)
cache = {}
pmf = dist.pmf
for subrvs in rvs:
marray = d.parameter_array(subrvs, cache=cache)
for idx in marray:
# Convert the sparse nonzero elements to a dense boolean array
bvec = np.zeros(n_elements, dtype=int)
bvec[idx] = 1
p = pmf[idx].sum()
if np.isclose(p, 0):
zero_elements += bvec
zero = np.nonzero(zero_elements)[0]
zeroset = set(zero)
nonzero = [i for i in range(n_elements) if i not in zeroset]
variables = Bunch(nonzero=nonzero, zero=zero)
return variables
@removals.remove(message="Please see methods in dit.algorithms.distribution_optimizers.py.",
version='1.0.1')
def isolate_zeros(dist, k):
"""
Determines if there are any elements of the optimization vector that must
be zero.
If :math:`p(marginal) = 0`, then every component of the joint that
contributes to that marginal probability must be exactly zero for all
feasible solutions.
"""
assert dist.is_dense()
assert dist.get_base() == 'linear'
d = get_abstract_dist(dist)
n_variables = d.n_variables
n_elements = d.n_elements
rvs = range(n_variables)
zero_elements = np.zeros(n_elements, dtype=int)
cache = {}
pmf = dist.pmf
if k > 0:
for subrvs in itertools.combinations(rvs, k):
marray = d.parameter_array(subrvs, cache=cache)
for idx in marray:
# Convert the sparse nonzero elements to a dense boolean array
bvec = np.zeros(n_elements, dtype=int)
bvec[idx] = 1
p = pmf[idx].sum()
if np.isclose(p, 0):
zero_elements += bvec
zero = np.nonzero(zero_elements)[0]
zeroset = set(zero)
nonzero = [i for i in range(n_elements) if i not in zeroset]
variables = Bunch(nonzero=nonzero, zero=zero)
return variables
def marginal_constraints_generic(dist, rvs, rv_mode=None,
with_normalization=True):
"""
Returns :math:`A` and :math:`b` in :math:`A x = b`, for a system of marginal
constraints.
In general, the resulting matrix :math:`A` will not have full rank.
Parameters
----------
dist : distribution
The distribution used to calculate the marginal constraints.
rvs : sequence
A sequence whose elements are also sequences. Each inner sequence
specifies a marginal distribution as a set of random variable from
`dist`. The inner sequences need not be pairwise mutually exclusive
with one another. A random variable can only appear once within
each inner sequence, but it can occur in multiple inner sequences.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options
are: {'indices', 'names'}. If equal to 'indices', then the elements
of `rvs` are interpreted as random variable indices. If equal to
'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted.
"""
assert dist.is_dense()
assert dist.get_base() == 'linear'
parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
unique=True, sort=True)[1]
# potential inclusion: include implied constraints
# rvs = set().union(*[set(r for r in powerset(rv) if r) for rv in rvs])
indexes = [parse(rv) for rv in rvs]
pmf = dist.pmf
d = get_abstract_dist(dist)
A = []
b = []
# Begin with the normalization constraint.
if with_normalization:
A.append(np.ones(d.n_elements))
b.append(1)
# Now add all the marginal constraints.
cache = {}
for rvec in indexes:
for idx in d.parameter_array(rvec, cache=cache):
bvec = np.zeros(d.n_elements)
bvec[idx] = 1
A.append(bvec)
b.append(pmf[idx].sum())
A = np.asarray(A, dtype=float)
b = np.asarray(b, dtype=float)
return A, b
def marginal_constraints(dist, m, with_normalization=True):
"""
Returns :math:`A` and :math:`b` in :math:`A x = b`, for a system of marginal
constraints.
The resulting matrix :math:`A` is not guaranteed to have full rank.
Parameters
----------
dist : distribution
The distribution from which the marginal constraints are constructed.
m : int
The size of the marginals to constrain. When `m=2`, pairwise marginals
are constrained to equal the pairwise marginals in `pmf`. When `m=3`,
three-way marginals are constrained to equal those in `pmf.
with_normalization : bool
If true, include a constraint for normalization.
Returns
-------
A : array-like, shape (p, q)
The matrix defining the marginal equality constraints and also the
normalization constraint. The number of rows is:
:math:`p = C(n_variables, m) * n_symbols ** m + 1`
where C() is the choose formula. The number of columns is:
:math:`q = n_symbols ** n_variables`
b : array-like, (p,)
The RHS of the linear equality constraints.
"""
n_variables = dist.outcome_length()
if m > n_variables:
msg = "Cannot constrain {0}-way marginals"
msg += " with only {1} random variables."
msg = msg.format(m, n_variables)
raise ValueError(msg)
rv_mode = dist._rv_mode
if rv_mode in [RV_MODES.NAMES, 'names']:
variables = dist.get_rv_names()
rvs = list(itertools.combinations(variables, m))
else:
rvs = list(itertools.combinations(range(n_variables), m))
A, b = marginal_constraints_generic(dist, rvs, rv_mode,
with_normalization=with_normalization)
return A, b
def marginal_constraint_rank(dist, m):
"""
Returns the rank of the marginal constraint matrix.
"""
dist = prepare_dist(dist)
A, b = marginal_constraints(dist, m)
_, _, rank = as_full_rank(A, b)
return rank
def moment(f, pmf, center=0, n=1):
"""
Return the nth moment of `f` about `center`, distributed by `pmf`.
Explicitly: :math:`\\sum_i (f(i) - center)**n p(i)`
Note, `pmf` is the joint distribution. So n=1 can be used even when
calculating covariances such as <xx> and <xy>. The first would actually
be a 2nd moment, while the second would be a mixed 1st moment.
Parameters
----------
f : array-like
The numerical values assigned to each outcome of `p`.
pmf : array-like
The pmf for a distribution, linear-distributed values.
center : float
Calculate a centered moment.
n : int
The moment to calculate.
"""
return ((f - center)**n * pmf).sum()
def moment_constraints(pmf, n_variables, m, symbol_map, with_replacement=True):
"""
Returns :math:`A` and :math:`b` in :math:`A x = b`, for an Ising-like
system.
If without replacement, we include only m-way first-moment constraints
where each element is distinct. So <xx> and <yy> would not be included if
`n_variables = 2` and `m = 2`.
The function we take means of is: :math:`f(x) = \\prod_i x_i`
The resulting matrix :math:`A` is not guaranteed to have full rank.
Parameters
----------
pmf : array-like, shape ( n_symbols ** n_variables, )
The probability mass function of the distribution. The pmf must have
a Cartesian product sample space with the same sample space used for
each random variable.
n_variables : int
The number of random variables.
m : int | list
The size of the moments to constrain. When `m = 2`, pairwise means
are constrained to equal the pairwise means in `pmf`. When `m = 3`,
three-way means are constrained to equal those in `pmf.
If m is a list, then include all m-way moments in the list.
symbol_map : array-like
A mapping from the ith symbol to a real number that is to be used in
the calculation of moments. For example, `symbol_map=[-1, 1]`
corresponds to the typical Ising model.
with_replacement : bool
If `True`, variables are selected with replacement. The standard Ising
does not select with replacement, and so terms like <xx>, <yy> do not
appear for m=2. When `True`, we are constraining the entire moment
matrix.
Returns
-------
A : array-like, shape (p, q)
The matrix defining the marginal equality constraints and also the
normalization constraint. The number of rows is:
:math:`p = C(n_variables, m) * n_symbols ** m + 1`
where C() is the choose formula. The number of columns is:
:math:`q = n_symbols ** n_variables`
b : array-like, (p,)
The RHS of the linear equality constraints.
"""
n_symbols = len(symbol_map)
d = AbstractDenseDistribution(n_variables, n_symbols)
if len(pmf) != d.n_elements:
msg = 'Length of `pmf` != n_symbols ** n_variables. Symbol map: {0!r}'
raise ValueError(msg.format(symbol_map))
# Begin with the normalization constraint.
A = [np.ones(d.n_elements)]
b = [1]
try:
m[0]
except TypeError:
mvals = [m]
except IndexError:
# m is empty list
pass
else:
mvals = m
if with_replacement:
combinations = itertools.combinations_with_replacement
else:
combinations = itertools.combinations
# Now add all the moment constraints.
for m in mvals:
if m < 1:
continue
outcomes = list(itertools.product(symbol_map, repeat=n_variables))
outcomes = np.asarray(outcomes)
for rvs in combinations(range(n_variables), m):
# Make it a list for NumPy indexing
rvs = list(rvs)
f = np.array([outcome[rvs].prod() for outcome in outcomes])
mean = moment(f, pmf, n=1)
A.append(f)
b.append(mean)
A = np.asarray(A, dtype=float)
b = np.asarray(b, dtype=float)
return A, b
def moment_constraint_rank(dist, m, symbol_map=None,
cumulative=True, with_replacement=True):
"""
Returns the rank of the moment constraint matrix.
"""
if cumulative:
mvals = range(m + 1)
else:
mvals = [m]
dist = prepare_dist(dist)
n_variables = dist.outcome_length()
n_symbols = len(dist.alphabet[0])
pmf = dist.pmf
# Symbol map
if symbol_map is None:
symbol_map = range(n_symbols)
A, b = moment_constraints(pmf, n_variables, mvals, symbol_map,
with_replacement=with_replacement)
_, _, rank = as_full_rank(A, b)
return rank
def ising_constraint_rank(dist, m, symbol_map=None, cumulative=True):
"""
Returns the rank of the Ising constraint matrix.
"""
return moment_constraint_rank(dist, m, symbol_map, cumulative,
with_replacement=False)
def negentropy(p):
"""
Entropy which operates on vectors of length `N`.
"""
negH = np.nansum(p * np.log2(p))
return negH
@removals.removed_class('MaximumEntropy',
replacement="dit.algorithms.scipy_optimizers.MaxEntOptimizer",
message="Please see methods in dit.algorithms.distribution_optimizers.py.",
version='1.0.1')
class MaximumEntropy(CVXOPT_Template):
"""
Find maximum entropy distribution.
"""
def build_function(self):
self.func = negentropy
@removals.removed_class('MarginalMaximumEntropy',
replacement="dit.algorithms.scipy_optimizers.MaxEntOptimizer",
message="Please see methods in dit.algorithms.distribution_optimizers.py.",
version='1.0.1')
class MarginalMaximumEntropy(MaximumEntropy):
"""
Find maximum entropy distribution subject to `k`-way marginal constraints.
`k = 0` should reproduce the behavior of MaximumEntropy.
"""
def __init__(self, dist, k, tol=None, prng=None):
"""
Initialize optimizer.
Parameters
----------
dist : distribution
The distribution used to specify the marginal constraints.
k : int
The number of variables in the constrained marginals.
"""
self.k = k
super().__init__(dist, tol=tol, prng=prng)
def prep(self):
# We are only removing elements which should be fixed at zero.
# This means they don't contribute to the entropy, so there is no
# need to adjust the function. Also, we are using numdifftools.
self.variables = isolate_zeros(self.dist, self.k)
# Make self.n reflect only the size of the nonzero elements. This
# automatically adjusts the size of G for the inequality constraint.
self.n = len(self.variables.nonzero) # pylint: disable=no-member
def build_linear_equality_constraints(self):
from cvxopt import matrix
A, b = marginal_constraints(self.dist, self.k)
# Reduce the size of the constraint matrix
# Since we are only removing elements which are exactly zero, then
# the constraint equations are unchanged. E.g. the normalization is
# still that the nonzero values should add to 1.
Asmall = A[:, self.variables.nonzero] # pylint: disable=no-member
Asmall, b, rank = as_full_rank(Asmall, b)
if rank > Asmall.shape[1]:
raise ValueError('More independent constraints than free parameters.')
Asmall = matrix(Asmall)
b = matrix(b) # now a column vector
self.A = Asmall
self.b = b
def initial_dist(self):
from .maxentropyfw import initial_point
initial_x, _ = initial_point(self.dist, self.k, A=self.A, b=self.b,
isolated=self.variables,
show_progress=False)
return initial_x
def build_gradient_hessian(self):
ln2 = np.log(2)
def gradient(xarr):
# This operates only on nonzero elements.
# All of the optimization elements should be greater than zero
# But occasional they might go slightly negative or zero.
# In those cases, we will just set the gradient to zero and keep the
# value fixed from that point forward.
bad_x = xarr <= 0
grad = np.log2(xarr) + 1 / ln2
grad[bad_x] = 0
return grad
def hessian(xarr):
bad_x = xarr <= 0
diag = 1 / xarr / ln2
diag[bad_x] = 0
return np.diag(diag)
self.gradient = gradient
self.hessian = hessian
class MomentMaximumEntropy(MaximumEntropy):
"""
Find maximum entropy distribution subject to `k`-way marginal constraints.
`k = 0` should reproduce the behavior of MaximumEntropy.
"""
def __init__(self, dist, k, symbol_map, cumulative=True,
with_replacement=True, tol=None, prng=None):
"""
Initialize optimizer.
Parameters
----------
dist : distribution
The distribution used to specify the marginal constraints.
k : int
The number of variables in the constrained marginals.
symbol_map : list
The mapping from states to real numbers. This is used while taking
moments.
cumulative : bool
If `True`, include all moments less than or equal to `k`.
with_replacement : bool
If `True`, then variables are selected for moments with replacement.
The standard Ising model selects without replacement.
tol : float | None
The desired convergence tolerance.
prng : RandomState
A pseudorandom number generator.
"""
self.k = k
self.symbol_map = symbol_map
self.cumulative = cumulative
self.with_replacement = with_replacement
super().__init__(dist, tol=tol, prng=prng)
def build_linear_equality_constraints(self):
from cvxopt import matrix
# Dimension of optimization variable
n = self.n
if self.cumulative:
k = range(self.k + 1)
else:
k = [self.k]
args = (self.pmf, self.n_variables, k, self.symbol_map)
kwargs = {'with_replacement': self.with_replacement}
A, b = moment_constraints(*args, **kwargs)
AA, bb, rank = as_full_rank(A, b)
if rank > n:
raise ValueError('More independent constraints than parameters.')
AA = matrix(AA)
bb = matrix(bb) # now a column vector
self.A = AA
self.b = bb
@removals.remove(message="Please see methods in dit.algorithms.distribution_optimizers.py.",
version='1.0.1')
def marginal_maxent_dists(dist, k_max=None, jitter=True, show_progress=True):
"""
Return the marginal-constrained maximum entropy distributions.
Parameters
----------
dist : distribution
The distribution used to constrain the maxent distributions.
k_max : int
The maximum order to calculate.
jitter : bool | float
When `True` or a float, we perturb the distribution slightly before
proceeding. This can sometimes help with convergence.
show-progress : bool
If `True`, show convergence progress to stdout.
"""
dist = prepare_dist(dist)
if jitter:
# This is sometimes necessary. If your distribution does not have
# full support than convergence can be difficult to come by.
dist.pmf = dit.math.pmfops.jittered(dist.pmf)
n_variables = dist.outcome_length()
if k_max is None:
k_max = n_variables
outcomes = list(dist.sample_space())
dists = []
for k in range(k_max + 1):
print()
print("Constraining maxent dist to match {0}-way marginals.".format(k))
print()
opt = MarginalMaximumEntropy(dist, k)
pmf_opt = opt.optimize(show_progress=show_progress)
pmf_opt = pmf_opt.reshape(pmf_opt.shape[0])
pmf = np.zeros(len(dist.pmf))
pmf[opt.variables.nonzero] = pmf_opt # pylint: disable=no-member
d = dit.Distribution(outcomes, pmf)
dists.append(d)
return dists
def moment_maxent_dists(dist, symbol_map, k_max=None, jitter=True,
with_replacement=True, show_progress=True):
"""
Return the marginal-constrained maximum entropy distributions.
Parameters
----------
dist : distribution
The distribution used to constrain the maxent distributions.
symbol_map : iterable
A list whose elements are the real values that each state is assigned
while calculating moments. Typical values are [-1, 1] or [0, 1].
k_max : int
The maximum order to calculate.
jitter : bool | float
When `True` or a float, we perturb the distribution slightly before
proceeding. This can sometimes help with convergence.
with_replacement : bool
If `True`, then variables are selected for moments with replacement.
The standard Ising model selects without replacement.
show-progress : bool
If `True`, show convergence progress to stdout.
"""
dist = prepare_dist(dist)
if jitter:
# This is sometimes necessary. If your distribution does not have
# full support than convergence can be difficult to come by.
dist.pmf = dit.math.pmfops.jittered(dist.pmf)
n_variables = dist.outcome_length()
symbols = dist.alphabet[0]
if k_max is None:
k_max = n_variables
outcomes = list(dist._product(symbols, repeat=n_variables))
if with_replacement:
text = 'with replacement'
else:
text = 'without replacement'
dists = []
for k in range(k_max + 1):
msg = "Constraining maxent dist to match {0}-way moments, {1}."
print()
print(msg.format(k, text))
print()
opt = MomentMaximumEntropy(dist, k, symbol_map, with_replacement=with_replacement)
pmf_opt = opt.optimize(show_progress=show_progress)
pmf_opt = pmf_opt.reshape(pmf_opt.shape[0])
d = dit.Distribution(outcomes, pmf_opt)
dists.append(d)
return dists
| bsd-3-clause | dc301621165bb26c4427ff88a108f8ed | 31.898551 | 99 | 0.620617 | 3.995072 | false | false | false | false |
dit/dit | dit/convert.py | 1 | 2470 | """
Helper functions to convert between Distribution and ScalarDistribution.
"""
import dit
__all__ = (
'DtoSD',
'SDtoD'
)
def DtoSD(dist, extract):
"""
Convert a Distribution to a ScalarDistribution.
Parameters
----------
dist : Distribution
The Distribution to convert to a ScalarDistribution.
extract : bool
If `True` and the outcome length is 1, then we extract the sole
element from each outcome and use that value as the scalar outcome.
"""
if extract and dist.outcome_length() == 1:
outcomes = tuple(outcome[0] for outcome in dist.outcomes)
sample_space = dist.alphabet[0]
else:
outcomes = dist.outcomes
sample_space = None
# If people really want it, we can use _make_distribution.
# But we have to decide if we want to set the alphabet to the
# entire sample or just the sample space represented in outcomes.
d = dit.ScalarDistribution(outcomes, dist.pmf,
sample_space=sample_space,
base=dist.get_base(),
prng=dist.prng,
sort=False,
sparse=dist.is_sparse(),
validate=False)
return d
def SDtoD(dist):
"""
Convert a ScalarDistribution to a Distribution.
Parameters
----------
dist : ScalarDistribution
The ScalarDistribution to convert to a Distribution.
"""
from dit.exceptions import ditException, InvalidDistribution
import dit.validate as v
if len(dist.pmf) == 0:
msg = "Cannot convert from empty ScalarDistribution."
raise InvalidDistribution(msg)
# Check if every element of the sample space is a sequence of the same
# length. If so, this is an easy conversion. If not, then we make
# every outcome a 1-tuple and then construct the joint distribution.
try:
# Each outcome is of the same class.
v.validate_outcome_class(dist.outcomes)
# Each outcome has the same length.
v.validate_outcome_length(dist.outcomes)
# Each outcome is a 'sequence'.
v.validate_sequence(dist.outcomes[0])
except ditException:
# Nested translation.
outcomes = [(o,) for o in dist.outcomes]
else:
outcomes = dist.outcomes
d = dit.Distribution(outcomes, dist.pmf, base=dist.get_base())
return d
| bsd-3-clause | 8835ecbc4bcfc9afd3c7001a5546928e | 28.759036 | 75 | 0.608907 | 4.356261 | false | false | false | false |
dit/dit | tests/inference/test_segmentaxis.py | 1 | 2165 | """
Tests for dit.inference.segmentaxis.
"""
import pytest
import numpy as np
from dit.inference.segmentaxis import segment_axis
def test_sa1():
"""
Test 2d.
"""
a = np.arange(9).reshape(3, 3)
sa = segment_axis(a, 2, 1)
sa_correct = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8]])
assert np.all(sa == sa_correct)
def test_sa2():
"""
Test padding.
"""
a = np.arange(5)
sa = segment_axis(a, 2, 0, axis=0, end='pad', endvalue=7)
sa_correct = np.array([[0, 1],
[2, 3],
[4, 7]])
assert np.all(sa == sa_correct)
def test_sa3():
"""
Test cutting.
"""
a = np.arange(5)
sa = segment_axis(a, 2, 0, end='cut')
sa_correct = np.array([[0, 1],
[2, 3]])
assert np.all(sa == sa_correct)
def test_sa4():
"""
Test wrapping.
"""
a = np.arange(5)
sa = segment_axis(a, 2, 0, end='wrap')
sa_correct = np.array([[0, 1],
[2, 3],
[4, 0]])
assert np.all(sa == sa_correct)
def test_sa5():
"""
Test bad wrapping.
"""
a = np.arange(5)
with pytest.raises(ValueError, match="Not enough data points to segment array in 'cut' mode; try 'pad' or 'wrap'"):
segment_axis(a, 10, 0, end='cut')
def test_sa6():
"""
Test exceptions.
"""
a = np.arange(5)
with pytest.raises(ValueError, match="frames cannot overlap by more than 100%"):
segment_axis(a, 2, 3, end='wrap')
def test_sa7():
"""
Test exceptions.
"""
a = np.arange(5)
with pytest.raises(ValueError, match="overlap must be nonnegative and length must be positive"):
segment_axis(a, 2, -1, end='wrap')
def test_sa8():
"""
Test exceptions.
"""
a = np.arange(5)
with pytest.raises(ValueError, match="is not recognized"):
segment_axis(a, 7, 0, end='pants')
| bsd-3-clause | ad8050c646af042364dbc2be310ec78b | 21.319588 | 119 | 0.464665 | 3.382813 | false | true | false | false |
dit/dit | dit/multivariate/secret_key_agreement/reduced_intrinsic_mutual_informations.py | 1 | 3326 | """
The reduced intrinsic mutual information.
Note: this code is nowhere near efficient enough to actually run. Don't try it.
"""
from .base_skar_optimizers import BaseReducedIntrinsicMutualInformation
from .intrinsic_mutual_informations import (intrinsic_total_correlation,
intrinsic_dual_total_correlation,
intrinsic_caekl_mutual_information,
)
__all__ = (
'reduced_intrinsic_total_correlation',
'reduced_intrinsic_dual_total_correlation',
'reduced_intrinsic_CAEKL_mutual_information',
)
class ReducedIntrinsicTotalCorrelation(BaseReducedIntrinsicMutualInformation):
"""
Compute the reduced intrinsic total correlation.
"""
name = 'total correlation'
measure = staticmethod(intrinsic_total_correlation)
reduced_intrinsic_total_correlation = ReducedIntrinsicTotalCorrelation.functional()
class ReducedIntrinsicDualTotalCorrelation(BaseReducedIntrinsicMutualInformation):
"""
Compute the reduced intrinsic dual total correlation.
"""
name = 'dual total correlation'
measure = staticmethod(intrinsic_dual_total_correlation)
reduced_intrinsic_dual_total_correlation = ReducedIntrinsicDualTotalCorrelation.functional()
class ReducedIntrinsicCAEKLMutualInformation(BaseReducedIntrinsicMutualInformation):
"""
Compute the reduced intrinsic CAEKL mutual information.
"""
name = 'CAEKL mutual information'
measure = staticmethod(intrinsic_caekl_mutual_information)
reduced_intrinsic_CAEKL_mutual_information = ReducedIntrinsicCAEKLMutualInformation.functional()
def reduced_intrinsic_mutual_information_constructor(func): # pragma: no cover
"""
Given a measure of shared information, construct an optimizer which computes
its ``reduced intrinsic'' form.
Parameters
----------
func : function
A function which computes the information shared by a set of variables.
It must accept the arguments `rvs' and `crvs'.
Returns
-------
RIMI : BaseReducedIntrinsicMutualInformation
An reduced intrinsic mutual information optimizer using `func` as the
measure of multivariate mutual information.
Notes
-----
Due to the casting to a Distribution for processing, optimizers constructed
using this function will be significantly slower than if the objective were
written directly using the joint probability ndarray.
"""
class ReducedIntrinsicMutualInformation(BaseReducedIntrinsicMutualInformation):
name = func.__name__
measure = staticmethod(func)
ReducedIntrinsicMutualInformation.__doc__ = \
"""
Compute the reduced intrinsic {name}.
""".format(name=func.__name__)
docstring = \
"""
Compute the {name}.
Parameters
----------
d : Distribution
The distribution to compute {name} of.
Returns
-------
imi : float
The {name}.
""".format(name=func.__name__)
try:
# python 2
ReducedIntrinsicMutualInformation.objective.__func__.__doc__ = docstring
except AttributeError:
# python 3
ReducedIntrinsicMutualInformation.objective.__doc__ = docstring
return ReducedIntrinsicMutualInformation
| bsd-3-clause | 3e7b77f4e6e41b194f72b6a669131662 | 29.513761 | 96 | 0.692724 | 4.264103 | false | false | false | false |
dit/dit | dit/math/pmfops.py | 1 | 15395 | """
A catch-all module for miscellaneous pmf-based operations.
Eventually, we will need to reorganize.
"""
import dit
from ..exceptions import ditException
import numpy as np
__all__ = (
'convex_combination',
'downsample',
'jittered',
'perturb_support',
'replace_zeros',
)
def perturb_support(pmf, eps=.1, shape='ball', prng=None):
"""
Returns a new distribution with all nonzero probabilities perturbed.
Probabilities which are zero in the pmf cannot be perturbed by this method.
All other probabilities are perturbed via the following process:
0. Initial pmf ``p`` lives on the ``n``-simplex.
1. Transform ``p`` via ilr (isometric logarithmic ratio) transform.
2. Uniformly draw ``n`` random numbers between ``[0,1]``.
3. Construct new transformed pmf: `p2_ilr = p1_ilr + eps * rand`
4. Apply inverse ilr transformation.
Practically, a large value of `eps` means that there is a better chance
the perturbation will take the distribution closer to the simplex boundary.
Large distributions (with more than 60 elements) fail, due to some
underflow issue with the ilr transformation.
Parameters
----------
pmf : NumPy array, shape (n,) or (k,n)
The distribution to be perturbed. Assumes `pmf` represents linearly
distributed probabilities. One may pass in `k` such distributions.
eps : float
The scaling factor used for perturbing. Values of `10` correspond
to large perturbations for the ``1``-simplex.
shape : str
The type of neighborhood to draw from. Valid options are 'square' or
'ball'. For 'square', a point is chosen uniformly from a unit square
centered around `pmf` in ilr coordinates. For 'ball' a point is chosen
uniformly from the unit circle centered around the pmf in ilr
coordinates. In both cases, the region is then scaled by `eps`.
prng : NumPy RandomState
A random number generator. If `None`, then `dit.math.prng` is used.
Returns
-------
out : NumPy array
The perturbed distribution.
References
----------
For details on the isometric log-ratio transformation see [1]_.
.. [1] J. J. Egozcue, V. Pawlowsky-Glahn, G. Mateu-Figueras, C.
Barceló-Vidal. Isometric Logratio Transformations for Compositional
Data Analysis, Mathematical Geology. April 2003, Volume 35, Issue 3,
pp 279-300. http://dx.doi.org/10.1023/A:1023818214614
"""
if prng is None:
prng = dit.math.prng
pmf_2d = np.atleast_2d(pmf)
out = np.zeros_like(pmf_2d)
for i, row in enumerate(pmf_2d):
# We must treat each row differently because their supports
# could live on different simplices.
idx = row > 0
p1 = row[idx]
p1_ilr = dit.math.aitchison.ilr(p1)
if shape == 'square':
delta = eps * (prng.rand(*p1_ilr.shape) - .5)
elif shape == 'ball':
delta = eps * dit.math.ball(p1_ilr.shape[0], prng=prng)
else:
msg = "Shape {} not recognized.".format(shape)
raise ditException(msg)
p2_ilr = p1_ilr + delta
p2 = dit.math.aitchison.ilr_inv(p2_ilr)
out[i, idx] = p2
if len(pmf.shape) == 1:
out = out[0]
return out
def replace_zeros(pmf, delta, rand=True, prng=None):
"""
Replaces zeros in a pmf with values smaller than `eps`.
Note that when considering the Aitchison geometry, the boundaries of the
simplex are infinitely far away from the uniform distribution. So this
operation, while numerically small, moves to a new distribution that
is significantly closer to the uniform distribution (relatively speaking).
The replacement strategy is done in a multiplicative fashion according
to the following formula [1]_, but optionally, with randomly determined
replacement values:
.. math::
x_i^\\prime =
\\begin{cases}
\\delta_i & x_i = 0 \\
x_i (1 - \\sum_i \\delta_i) & x_i \\neq 0
\\end{cases}
where :math:`\\delta_i` is the replacement value for each zero element.
This approach is preferred since it preserves the ratios of nonzero
elements, an important feature of distributions. Simply adding some values
to the zero elements and then renormalizing would not preserve the ratios.
Parameters
----------
pmf : NumPy array, shape (n,) or (k, n)
The distribution or `k` distributions living on the `(n-1)`-simplex.
delta : float
A small value for all the zero elements in the pmf.
rand : bool
When `True`, the replacement values for zero elements are random
numbers less than `delta`. When `False`, the replacement values are
equal to `delta`.
prng : NumPy RandomState
A random number generator used to select replacement values when
`rand` is `True`. If None, then `dit.math.prng` is used.
Returns
-------
d : NumPy array, shape (n,) or (k, n)
The distributions with zeros replaced.
Examples
--------
>>> d = np.array([.25, .75, 0])
>>> replace_zeros(d, .01, rand=False)
array([ 0.2475, 0.7425, 0.01 ])
Notes
-----
When the distribution is determined by counts and the total number of
counts is known, this method can be modified so that the value of `delta`
is chosen according to a Bayesian inferential procedure. Not implemented.
References
----------
.. [1] Martín-Fernández JA and Thió-Henestrosa S, 2006. Rounded zeros: some
practical aspects for compositional data. In Compositional Data Analysis
in the Geosciences: From Theory to Practice, vol. 264. Geological Society,
London, pp. 191–201.
"""
if prng is None:
prng = dit.math.prng
nonzero = pmf == 0
replacements = np.zeros(nonzero.sum(), dtype=float)
if rand:
replacements = delta * prng.rand(len(replacements))
else:
replacements += delta
d = pmf.copy()
d[nonzero] += replacements
d[~nonzero] *= 1 - replacements.sum()
return d
def jittered(pmf, jitter=1e-5, zeros=True, prng=None):
"""
Jitters the elements of `pmf`.
Parameters
----------
pmf : NumPy array, shape (n,) or (k, n)
The pmf or `k` pmfs to jitter.
jitter : float
The jitter amount. The value is used for both zero and nonzero
elements in the pmf.
zeros : bool
If `True`, the zeros in `pmf` are first replaced using `jitter` as
the `delta` parameter in :meth:`replace_zeros`. If `False`, only the
nonzero elements in `pmf` are jittered.
prng : NumPy RandomState
A random number generator used to select replacement values when
`rand` is `True`. If None, then `dit.math.prng` is used.
Returns
-------
d : NumPy array, shape (n,) or (k, n)
The jittered pmf(s).
Examples
--------
>>> d = np.array([.5, .5, 0, 0])
>>> jittered(d)
array([4.99999999e-01, 4.99999999e-01, 6.54894572e-10, 5.49417792e-10])
See Also
--------
replace_zeros, perturb_support
"""
if prng is None:
prng = dit.math.prng
if zeros:
d = replace_zeros(pmf, jitter, prng=prng)
else:
d = pmf
d = perturb_support(d, jitter, prng=prng)
return d
def convex_combination(pmfs, weights=None):
"""
Forms the convex combination of the pmfs.
Assumption: All pmf probabilities and weights are linearly distributed.
Parameters
----------
pmfs : NumPy array, shape (n,k)
The `n` distributions, each of length `k` that will be mixed.
weights : NumPy array, shape (n,)
The weights applied to each pmf. This array will be normalized
automatically.
"""
# Possibly could be used to speed up dit.mixture_distribution2().
pmfs = np.atleast_2d(pmfs)
if weights is None:
weights = np.ones(pmfs.shape[0], dtype=float) / pmfs.shape[0]
else:
weights = np.asarray(weights, dtype=float)
weights /= weights.sum()
mixture = (pmfs * weights[:, np.newaxis]).sum(axis=0)
return mixture
def downsample(pmf, subdivisions, method='componentL1'):
"""
Returns the nearest pmf on a triangular grid.
When multiple grid points are equally close, only one of them is returned.
The particular grid point returned is arbitrary and determined by the
method that compares distances.
Parameters
----------
pmf : NumPy array, shape (n,) or (k, n)
The pmf on the ``(n-1)``-simplex.
subdivisions : int
The number of subdivisions for the interval [0, 1]. The grid considered
is such that each component will take on values at the boundaries of
the subdivisions. For example, subdivisions corresponds to
:math:`[[0, 1/2], [1/2, 1]]` and thus, each component can take the
values 0, 1/2, or 1. So one possible pmf would be (1/2, 1/2, 0).
method : str
The algorithm used to determine what `nearest` means. The default
method, 'componentL1', moves each component to its nearest grid
value using the L1 norm.
Returns
-------
d : NumPy array, shape (n,)
The downsampled pmf.
See Also
--------
dit.simplex_grid
"""
if method in _methods:
return _methods[method](pmf, subdivisions)
else:
raise NotImplementedError('Unknown method.')
def _downsample_componentL1(pmf, i, op, locs):
"""
Low-level function to incrementally project a pmf.
Parameters
----------
pmf : NumPy array, shape (n, k)
A 2D NumPy array that is modified in-place. The columns represent
the various pmfs. The rows represent each component.
i : int
The component to be projected.
op : callable
This is np.argmin or np.argmax. It determines the projection.
locs : NumPy array
The subdivisions for each component.
"""
clamps = clamped_indexes(pmf[i], locs)
lower, upper = clamps
# Actually get the clamped region
gridvals = locs[clamps]
# Calculate distance to each point, per component.
distances = np.abs(gridvals - pmf[i])
# Determine which index each component was closest to.
# desired[i] == 0 means that the lower index was closer
# desired[i] == 1 means that the upper index was closer
# If there are any symmetries in the distribution, it could happen
# that some of the distances are equal. The op() will select only
# one of these branches---this prevents an exhaustive listing of
# all possible neighbors. A small jitter is recommended. This will
# have a marginal effect on any binning we might do.
desired = op(distances, axis=0)
# Pull those indexes from the clamping indexes
# So when desired[i] == 1, we want to pull the upper index.
locations = np.where(desired, upper, lower)
pmf[i] = locs[locations]
# Now renormalize the other components of the distribution...
temp = pmf.transpose() # View
prev_Z = temp[..., :i + 1].sum(axis=-1)
zeros = np.isclose(prev_Z, 1)
Z = (1 - prev_Z) / temp[..., i + 1:].sum(axis=-1)
temp[..., i + 1:] *= Z[..., np.newaxis]
# This assumes len(shape) == 2.
temp[zeros, i + 1:] = 0
return locations
def downsample_componentL1(pmf, subdivisions):
"""
Clamps each component, one-by-one.
Renormalizes and uses updated insert indexes as you go.
"""
locs = np.linspace(0, 1, subdivisions + 1)
out = np.atleast_2d(pmf).transpose().copy()
# Go through each component and move to closest component.
op = np.argmin
for i in range(out.shape[0] - 1):
_downsample_componentL1(out, i, op, locs)
out = out.transpose()
if len(pmf.shape) == 1:
out = out[0]
return out
def clamped_indexes(pmf, locs):
"""
Returns the indexes of the component values that clamp the pmf.
If the component value is equal to a grid point, then the lower and upper
clamps are equal to one another.
Returns
-------
clamps : NumPy array, shape (2,n) or (2,k,n)
Examples
--------
>>> locs = np.linspace(0, 1, 3) # [0, 1/2, 1]
>>> d = np.array([.25, .5, .25])
>>> clamped_indices(d, locs)
array([[0, 1, 0],
[1, 1, 1]])
"""
# Find insertion indexes
left_index = np.searchsorted(locs, pmf, 'left')
right_index = np.searchsorted(locs, pmf, 'right')
# If the left index does not equal the right index, then the coordinate
# is equal to an element of `locs`. This means we want its upper and lower
# clamped indexes to be equal.
#
# If the left and right indexes are equal, then, the (left) index specifies
# the upper clamp. We subtract one for the lower clamp. There is no concern
# for the lower clamp dropping to -1 since already know that the coordinate
# is not equal to an element in `locs`.
upper = left_index
lower = upper - 1
mask = left_index != right_index
lower[mask] = upper[mask]
clamps = np.array([lower, upper])
return clamps
def projections(pmf, subdivisions, ops=None):
"""
Returns the projections on the way to the nearest grid point.
The original pmf is included in the final output.
Parameters
----------
pmf : NumPy array, shape (n,) or (k, n)
The pmf on the ``(n-1)``-simplex. Optionally, provide `k` pmfs.
subdivisions : int
The number of subdivisions for the interval [0, 1]. The grid considered
is such that each component will take on values at the boundaries of
the subdivisions. For example, subdivisions corresponds to
:math:`[[0, 1/2], [1/2, 1]]` and thus, each component can take the
values 0, 1/2, or 1. So one possible pmf would be (1/2, 1/2, 0).
method : str
The algorithm used to determine what `nearest` means. The default
method, 'componentL1', moves each component to its nearest grid
value using the L1 norm.
Other Parameters
----------------
ops : list
A list of `n-1` callables, where `n` the number of components in the
pmf. Each element in the list is a callable the determines how the
downsampled pmf's are constructed by specifying which of the lower
and upper clamped location indexes should be chosen. If `None`, then
`ops` is a list of `np.argmin` and will select the closest grid point.
Returns
-------
d : NumPy array, shape (n,n) or (n,k,n)
The projections leading to the downsampled pmf.
See Also
--------
downsample, dit.simplex_grid
"""
locs = np.linspace(0, 1, subdivisions + 1)
out = np.atleast_2d(pmf).transpose().copy()
projs = [out.copy()]
if ops is None:
# Take closest point in regional cell.
ops = [np.argmin] * (out.shape[0] - 1)
# Go through each component and move to closest component.
for i, op in zip(range(out.shape[0] - 1), ops):
_downsample_componentL1(out, i, op, locs)
projs.append(out.copy())
projs = np.asarray(projs)
projs = np.swapaxes(projs, 1, 2)
if len(pmf.shape) == 1:
projs = projs[:, 0, :]
return projs
_methods = {
'componentL1': downsample_componentL1
}
| bsd-3-clause | 183995582ee05f50e2f84af8c49cfd73 | 32.023605 | 79 | 0.629736 | 3.668415 | false | false | false | false |
dit/dit | dit/multivariate/common_informations/base_markov_optimizer.py | 1 | 10027 | """
Abstract base classes.
"""
from abc import abstractmethod
import numpy as np
from ...algorithms import BaseAuxVarOptimizer
from ...utils import unitful
from ..dual_total_correlation import dual_total_correlation
from ..entropy import entropy
__all__ = (
'MarkovVarOptimizer',
'MinimizingMarkovVarOptimizer',
)
class MarkovVarOptimizer(BaseAuxVarOptimizer):
"""
Abstract base class for constructing auxiliary variables which render a set
of variables conditionally independent.
"""
name = ""
description = ""
def __init__(self, dist, rvs=None, crvs=None, bound=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the auxiliary Markov variable, W, for.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables to render conditionally independent. If None, then all
random variables are used, which is equivalent to passing
`rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to
condition on. If None, then no variables are conditioned on.
bound : int
Place an artificial bound on the size of W.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
super().__init__(dist, rvs=rvs, crvs=crvs, rv_mode=rv_mode)
theoretical_bound = self.compute_bound()
bound = min(bound, theoretical_bound) if bound else theoretical_bound
rv_bounds = self._shape[1:-1]
self._pmf_to_match = self._pmf.copy()
# remove the rvs other than the first, they need to be generated by W
# in order to satisfy the markov criteria:
self._pmf = self._pmf.sum(axis=tuple(range(1, len(self._shape) - 1)))
self._shape = self._pmf.shape
self._all_vars = {0, 1}
self._full_pmf = self._full_pmf.sum(axis=tuple(range(self._n + 1, len(self._full_shape) - 1)))
self._full_shape = self._full_pmf.shape
self._full_vars = tuple(range(self._n + 2))
# back up where the rvs and crvs are, they need to be reflect
# the above removals for the sake of adding auxvars:
self.__rvs, self._rvs = self._rvs, {0}
self.__crvs, self._crvs = self._crvs, {1}
self._construct_auxvars([({0, 1}, bound)]
+ [({1, 2}, s) for s in rv_bounds])
# put rvs, crvs back:
self._rvs = self.__rvs
self._crvs = self.__crvs
del self.__rvs
del self.__crvs
self._W = {1 + len(self._aux_vars)}
# The constraint that the joint doesn't change.
self.constraints += [{'type': 'eq',
'fun': self.constraint_match_joint,
},
]
self._default_hops = 5
self._additional_options = {'options': {'maxiter': 1000,
'ftol': 1e-6,
'eps': 1.4901161193847656e-9,
}
}
@abstractmethod
def compute_bound(self):
"""
Return a bound on the cardinality of the auxiliary variable.
Returns
-------
bound : int
The bound on the size of W.
"""
pass
def construct_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = super().construct_joint(x)
joint = np.moveaxis(joint, 1, -1) # move crvs
joint = np.moveaxis(joint, 1, -1) # move W
return joint
def construct_full_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = super().construct_full_joint(x)
joint = np.moveaxis(joint, self._n + 1, -1) # move crvs
joint = np.moveaxis(joint, self._n + 1, -1) # move W
return joint
def constraint_match_joint(self, x):
"""
Ensure that the joint distribution represented by the optimization
vector matches that of the distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
"""
joint = self.construct_joint(x)
joint = joint.sum(axis=-1) # marginalize out w
delta = (100 * (joint - self._pmf_to_match)**2).sum()
return delta
@classmethod
def functional(cls):
"""
Construct a functional form of the optimizer.
"""
@unitful
def common_info(dist, rvs=None, crvs=None, niter=None, maxiter=1000, polish=1e-6, bound=None, rv_mode=None):
dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
ent = entropy(dist, rvs, crvs, rv_mode)
if np.isclose(dtc, ent):
# Common informations are bound between the dual total correlation and the joint
# entropy. Therefore, if the two are equal, the common information is equal to them
# as well.
return dtc
ci = cls(dist, rvs, crvs, bound, rv_mode)
ci.optimize(niter=niter, maxiter=maxiter, polish=polish)
return ci.objective(ci._optima)
common_info.__doc__ = \
"""
Computes the {name} common information, {description}.
Parameters
----------
dist : Distribution
The distribution for which the {name} common information will be
computed.
rvs : list, None
A list of lists. Each inner list specifies the indexes of the random
variables used to calculate the {name} common information. If None,
then it calculated over all random variables, which is equivalent to
passing `rvs=dist.rvs`.
crvs : list, None
A single list of indexes specifying the random variables to condition
on. If None, then no variables are conditioned on.
niter : int > 0
Number of basin hoppings to perform during the optimization.
maxiter : int > 0
The number of iterations of the optimization subroutine to perform.
polish : False, float
Whether to polish the result or not. If a float, this will perform a
second optimization seeded with the result of the first, but with
smaller tolerances and probabilities below polish set to 0. If
False, don't polish.
bound : int
Bound the size of the Markov variable.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{{'indices', 'names'}}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
ci : float
The {name} common information.
""".format(name=cls.name, description=cls.description)
return common_info
class MinimizingMarkovVarOptimizer(MarkovVarOptimizer): # pragma: no cover
"""
Abstract base class for an optimizer which additionally minimizes the size
of the auxiliary variable.
"""
def optimize(self, x0=None, niter=None, maxiter=None, polish=1e-6, callback=False, minimize=True, min_niter=15):
"""
Parameters
----------
x0 : np.ndarray, None
The vector to initialize the optimization with. If None, a random
vector is used.
niter : int
The number of times to basin hop in the optimization.
maxiter : int
The number of inner optimizer steps to perform.
polish : False, float
Whether to polish the result or not. If a float, this will perform a
second optimization seeded with the result of the first, but with
smaller tolerances and probabilities below polish set to 0. If
False, don't polish.
callback : bool
Whether to utilize a callback or not.
minimize : bool
Whether to minimize the auxiliary variable or not.
min_niter : int
The number of basin hops to make during the minimization of the common variable.
"""
# call the normal optimizer
super().optimize(x0=x0,
niter=niter,
maxiter=maxiter,
polish=False,
callback=callback)
if minimize:
# minimize the entropy of W
self._post_process(style='entropy', minmax='min', niter=min_niter, maxiter=maxiter)
if polish:
self._polish(cutoff=polish)
| bsd-3-clause | 785a15e7cba6764c39719f4861966123 | 35.198556 | 116 | 0.566471 | 4.452487 | false | false | false | false |
dit/dit | dit/pid/measures/ict.py | 1 | 2123 | """
The redundancy measure of Sigtermans based on causal tensors.
"""
import numpy as np
from ...exceptions import ditException
from ..pid import BaseBivariatePID
__all__ = (
'PID_CT',
)
def i_triangle(d, source_0, source_1, target):
"""
Compute the path information, and if it is direct or not.
Parameters
----------
Returns
-------
direct_path : Bool
Whether Y -> Z exists or not.
"""
d = d.coalesce([source_0, source_1, target])
d.make_dense()
p_xyz = d.pmf.reshape([len(a) for a in d.alphabet])
p_xy = np.nansum(p_xyz, axis=(2,), keepdims=True)
p_xz = np.nansum(p_xyz, axis=(1,), keepdims=True)
p_yz = np.nansum(p_xyz, axis=(0,), keepdims=True)
p_x = np.nansum(p_xyz, axis=(1, 2), keepdims=True)
p_y = np.nansum(p_xyz, axis=(0, 2), keepdims=True)
p_z = np.nansum(p_xyz, axis=(0, 1), keepdims=True)
A = p_xy / p_x
B = p_yz / p_y
C = p_xz / p_x
Add = p_xy / p_y
cascade_xyz = np.nansum(A * B, axis=(1,), keepdims=True)
cascade_yxz = np.nansum(Add * C, axis=(0,), keepdims=True)
direct_yz = abs(B - cascade_yxz).sum() > 1e-6
direct_xz = abs(C - cascade_xyz).sum() > 1e-6
path_info_xyz = np.nansum((p_x * cascade_xyz) * np.log2(cascade_xyz / p_z))
path_info_yxz = np.nansum((p_y * cascade_yxz) * np.log2(cascade_yxz / p_z))
if not (direct_xz ^ direct_yz):
return min(path_info_xyz, path_info_yxz)
elif direct_xz:
return path_info_yxz
elif direct_yz:
return path_info_xyz
else:
raise ditException("Something went wrong...")
class PID_CT(BaseBivariatePID):
"""
The bivariate PID defined by Sigtermans using causal tensors.
"""
_name = "I_△"
@staticmethod
def _measure(d, sources, target):
"""
The PID measure of Sigtermans based on causal tensors.
"""
if len(sources) != 2: # pragma: no cover
msg = "This method needs exact two sources, {} given.".format(len(sources))
raise ditException(msg)
return i_triangle(d, sources[0], sources[1], target)
| bsd-3-clause | be76aa5018b8b10326097b24a87767ac | 25.5125 | 87 | 0.583687 | 3.004249 | false | false | false | false |
dit/dit | tests/shannon/test_shannon.py | 1 | 2265 | """
Tests for dit.shannon.shannon.
"""
import pytest
import numpy as np
from dit import Distribution as D, ScalarDistribution as SD
from dit.shannon import (entropy as H,
mutual_information as I,
conditional_entropy as CH,
entropy_pmf)
def test_entropy_pmf1d():
""" Test the entropy of a fair coin """
d = [0.5, 0.5]
assert entropy_pmf(d) == pytest.approx(1.0)
def test_entropy_pmf2d():
""" Test the entropy of a fair coin """
d = np.array([[1, 0], [0.5, 0.5]])
H = np.array([0, 1])
assert np.allclose(entropy_pmf(d), H)
def test_H1():
""" Test the entropy of a fair coin """
d = SD([1 / 2, 1 / 2])
assert H(d) == pytest.approx(1.0)
def test_H2():
""" Test the entropy of a fair coin, float style """
assert H(1 / 2) == pytest.approx(1.0)
def test_H3():
""" Test joint and marginal entropies """
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = D(outcomes, pmf)
assert H(d, [0]) == pytest.approx(1.0)
assert H(d, [1]) == pytest.approx(1.0)
assert H(d, [0, 1]) == pytest.approx(2.0)
assert H(d) == pytest.approx(2.0)
def test_H4():
""" Test entropy in base 10 """
d = SD([1 / 10] * 10)
d.set_base(10)
assert H(d) == pytest.approx(1.0)
def test_I1():
""" Test mutual information of independent variables """
outcomes = ['00', '01', '10', '11']
pmf = [1 / 4] * 4
d = D(outcomes, pmf)
assert I(d, [0], [1]) == pytest.approx(0.0)
def test_I2():
""" Test mutual information of dependent variables """
outcomes = ['00', '11']
pmf = [1 / 2] * 2
d = D(outcomes, pmf)
assert I(d, [0], [1]) == pytest.approx(1.0)
def test_I3():
""" Test mutual information of overlapping variables """
outcomes = ['000', '011', '101', '110']
pmf = [1 / 4] * 4
d = D(outcomes, pmf)
assert I(d, [0, 1], [1, 2]) == pytest.approx(2.0)
def test_CH1():
""" Test conditional entropies """
outcomes = ['000', '011', '101', '110']
pmf = [1 / 4] * 4
d = D(outcomes, pmf)
assert CH(d, [0], [1, 2]) == pytest.approx(0.0)
assert CH(d, [0, 1], [2]) == pytest.approx(1.0)
assert CH(d, [0], [0]) == pytest.approx(0.0)
| bsd-3-clause | f92517b67d43475559356725ffb87098 | 24.449438 | 60 | 0.53245 | 2.792848 | false | true | false | false |
dit/dit | dit/algorithms/convex_maximization.py | 1 | 2653 | """
Code for maximizing a convex function over a polytope, as defined
by a set of linear equalities and inequalities.
"""
import numpy as np
import scipy
from .optutil import as_full_rank
__all__ = (
'maximize_convex_function',
)
def maximize_convex_function(f, A_ineq, b_ineq, A_eq=None, b_eq=None):
"""
Maximize a convex function over a polytope. This function uses the fact that
the maximum of a convex function over a polytope will be achieved at one of
the extreme points of the polytope.
The maximization is done by taking a system of linear inequalities, using the
pypoman library to create a list of extreme points, and then evaluating the
objective function on each point.
Parameters
----------
f : function
Objective function to maximize
A_ineq : matrix
Specifies inequalities matrix, should be num_inequalities x num_variables
b_ineq : array
Specifies inequalities vector, should be num_inequalities long
A_eq : matrix
Specifies equalities matrix, should be num_equalities x num_variables
b_eq : array
Specifies equalities vector, should be num_equalities long
Returns tuple optimal_extreme_point, maximum_function_value
"""
best_x, best_val = None, -np.inf
A_ineq = A_ineq.astype('float')
b_ineq = b_ineq.astype('float')
A_ineq, b_ineq, _ = as_full_rank(A_ineq, b_ineq)
if A_eq is not None:
# pypoman doesn't support equality constraints. We remove equality
# constraints by doing a coordinate transformation.
A_eq = A_eq.astype('float')
b_eq = b_eq.astype('float')
A_eq, b_eq, _ = as_full_rank(A_eq, b_eq)
# Get one solution that satisfies A x0 = b
x0 = np.linalg.lstsq(A_eq, b_eq, rcond=None)[0]
assert(np.abs(A_eq.dot(x0) - b_eq).max() < 1e-5)
# Get projector onto null space of A, it satisfies AZ=0 and Z^T Z=I
Z = scipy.linalg.null_space(A_eq)
# Now every solution can be written as x = x0 + Zq, since A x = A x0 = b
# Inequalities get transformed as
# A'x <= b' ---> A'(x0 + Zq) <= b --> (A'Z)q \le b - A'x0
b_ineq = b_ineq - A_ineq.dot(x0)
A_ineq = A_ineq.dot(Z)
transform = lambda q: Z.dot(q) + x0
else:
transform = lambda x: x
import pypoman
extreme_points = pypoman.compute_polytope_vertices(A_ineq, b_ineq)
for v in extreme_points:
x = transform(v)
val = f(x)
if val > best_val:
best_x, best_val = x, val
if best_x is None:
raise Exception('No extreme points found!')
return best_x, best_val
| bsd-3-clause | 89b0626cbf20f708184dbdfec6e12dd3 | 28.153846 | 81 | 0.632115 | 3.21966 | false | false | false | false |
dit/dit | site/build.py | 1 | 1719 | #!/usr/bin/env python
"""
This script can be used to build the website.
It is also run on each commit to github.
Example: ./build public_html
"""
import os
import shutil
import subprocess
import sys
import time
BUILD_DIR = 'build'
def get_build_dir():
try:
build_dir = sys.argv[1]
except IndexError:
build_dir = BUILD_DIR
basedir = os.path.abspath(os.path.curdir)
build_dir = os.path.join(basedir, build_dir)
return build_dir
def build(dest):
source = os.path.split(os.path.abspath(__file__))[0]
source = os.path.join(source, 'src')
# We aren't doing anything fancy yet.
shutil.copytree(source, dest)
def update_gitrepo():
source = os.path.split(os.path.abspath(__file__))[0]
initial = os.getcwd()
try:
os.chdir(source)
subprocess.call(['git', 'pull'])
finally:
os.chdir(initial)
def main():
try:
min_delay = int(sys.argv[2]) * 60
except IndexError:
min_delay = 0
# Build only if enough time has passed.
build_dir = get_build_dir()
if os.path.exists(build_dir):
elapsed = time.time() - os.path.getmtime(build_dir)
if elapsed < min_delay:
print("Not enough time has elapsed since last build.")
sys.exit(0)
else:
# Delete it all!
if os.path.islink(build_dir):
os.unlink(build_dir)
else:
shutil.rmtree(build_dir)
elif os.path.islink(build_dir):
# Then its a bad symlink.
os.unlink(build_dir)
# update_gitrepo()
build(build_dir)
subprocess.call(['touch', build_dir])
print("Done.\n")
if __name__ == '__main__':
main()
| bsd-3-clause | 16bcb8bd5d97b820a649f85848761b2a | 20.4875 | 66 | 0.585806 | 3.472727 | false | false | false | false |
dit/dit | dit/other/extropy.py | 1 | 2159 | """
The extropy.
"""
from ..helpers import RV_MODES
from ..math.ops import get_ops
import numpy as np
__all__ = (
'extropy',
)
def extropy(dist, rvs=None, rv_mode=None):
"""
Returns the extropy J[X] over the random variables in `rvs`.
If the distribution represents linear probabilities, then the extropy
is calculated with units of 'bits' (base-2).
Parameters
----------
dist : Distribution or float
The distribution from which the extropy is calculated. If a float,
then we calculate the binary extropy.
rvs : list, None
The indexes of the random variable used to calculate the extropy.
If None, then the extropy is calculated over all random variables.
This should remain `None` for ScalarDistributions.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
J : float
The extropy of the distribution.
"""
try:
# Handle binary extropy.
float(dist)
except TypeError:
pass
else:
# Assume linear probability for binary extropy.
import dit
dist = dit.ScalarDistribution([dist, 1 - dist])
rvs = None
rv_mode = RV_MODES.INDICES
if dist.is_joint():
if rvs is None:
# Set to entropy of entire distribution
rvs = list(range(dist.outcome_length()))
rv_mode = RV_MODES.INDICES
d = dist.marginal(rvs, rv_mode=rv_mode)
else:
d = dist
pmf = d.pmf
if d.is_log():
base = d.get_base(numerical=True)
npmf = d.ops.log(1 - d.ops.exp(pmf))
terms = -base**npmf * npmf
else:
# Calculate entropy in bits.
log = get_ops(2).log
npmf = 1 - pmf
terms = -npmf * log(npmf)
J = np.nansum(terms)
return J
| bsd-3-clause | e64340a5ab059ac1c655a1b7796d5caf | 26.329114 | 78 | 0.599352 | 3.855357 | false | false | false | false |
dit/dit | dit/multivariate/common_informations/mss_common_information.py | 1 | 1932 | """
Compute the minimal sufficient statistic common information.
"""
from copy import deepcopy
import numpy as np
from ...algorithms.minimal_sufficient_statistic import insert_joint_mss
from ...helpers import normalize_rvs
from ...utils import unitful
from ..dual_total_correlation import dual_total_correlation
from ..entropy import entropy
__all__ = (
'mss_common_information',
)
@unitful
def mss_common_information(dist, rvs=None, crvs=None, rv_mode=None):
"""
Compute the minimal sufficient statistic common information, which is the
entropy of the join of the minimal sufficent statistic of each variable
about the others.
Parameters
----------
dist : Distribution
The distribution for which the joint minimal sufficient statistic is computed.
rvs : list, None
The random variables to compute the joint minimal sufficient statistic of. If None, all random variables are used.
crvs : list, None
The random variables to condition the joint minimal sufficient statistic on. If None, then no random variables are conditioned on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
"""
dist = deepcopy(dist)
dist.make_sparse()
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
dtc = dual_total_correlation(dist, rvs, crvs, rv_mode)
ent = entropy(dist, rvs, crvs, rv_mode)
if np.isclose(dtc, ent):
return dtc
d = insert_joint_mss(dist, -1, rvs, rv_mode)
M = entropy(d, [d.outcome_length() - 1], crvs, rv_mode)
return M
| bsd-3-clause | 71db0baccb5ad34dc9418a0f46f9339e | 32.894737 | 138 | 0.686335 | 3.926829 | false | false | false | false |
dit/dit | tests/divergences/test_cross_entropy.py | 1 | 1716 | """
Tests for dit.divergences.cross_entropy.
"""
import pytest
import numpy as np
from dit import Distribution
from dit.divergences import cross_entropy
from dit.exceptions import ditException
from dit.multivariate import entropy
d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])
d2 = Distribution(['0', '2'], [1 / 2, 1 / 2])
d3 = Distribution(['0', '1', '2'], [1 / 3, 1 / 3, 1 / 3])
d4 = Distribution(['00', '11'], [2 / 5, 3 / 5])
d5 = Distribution(['00', '11'], [1 / 2, 1 / 2])
@pytest.mark.parametrize(('args', 'expected'), [
([d1, d3], 1.5849625007211563),
([d1, d4], 1.0294468445267841),
([d1, d3, [0]], 1.5849625007211563),
([d1, d4, [0]], 1.0294468445267841),
([d4, d1, [0]], 1),
([d4, d5], 1),
([d5, d4], 1.0294468445267841),
([d4, d5, [0], [1]], 0),
([d4, d5, [1], [0]], 0),
([d1, d2], np.inf),
([d2, d1], np.inf),
([d3, d1], np.inf),
])
def test_cross_entropy_1(args, expected):
"""
Test against several known values.
"""
assert cross_entropy(*args) == pytest.approx(expected)
@pytest.mark.parametrize('d', [d1, d2, d3, d4, d5])
def test_cross_entropy_2(d):
"""
Test that xH(d, d) = H(d).
"""
assert cross_entropy(d, d) == pytest.approx(entropy(d))
@pytest.mark.parametrize('args', [
[d4, d1, None, None],
[d4, d2, None, None],
[d4, d3, None, None],
[d1, d2, [0, 1], None],
[d3, d4, [1], None],
[d5, d1, [0], [1]],
[d4, d3, [1], [0]],
])
def test_cross_entropy_3(args):
"""
Test that when p has outcomes that q doesn't have, that we raise an exception.
"""
first, second, rvs, crvs = args
with pytest.raises(ditException):
cross_entropy(first, second, rvs, crvs)
| bsd-3-clause | d74e2f3281fd94689d4ef9eb82eb1687 | 25 | 82 | 0.556527 | 2.561194 | false | true | false | false |
dit/dit | tests/test_abstractdist.py | 2 | 1053 | """
Tests for dit.abstractdist.
"""
import numpy as np
from dit.abstractdist import get_abstract_dist, distribution_constraint
from dit.example_dists import Xor
def test_distribution_constraint1():
"""
Test the xor distribution.
"""
d = Xor()
ad = get_abstract_dist(d)
A, b = distribution_constraint([0], [1], ad)
true_A = np.array([[0, 0, 1, 1, -1, -1, 0, 0],
[0, 0, -1, -1, 1, 1, 0, 0]])
true_b = np.array([0, 0, 0, 0, 0, 0, 0, 0])
assert (A == true_A).all()
assert (b == true_b).all()
def test_distribution_constraint2():
"""
Test the xor distribution.
"""
d = Xor()
ad = get_abstract_dist(d)
A, b = distribution_constraint([0, 1], [1, 2], ad)
true_A = np.array([[0, 1, 0, 0, -1, 0, 0, 0],
[0, -1, 1, 1, 0, -1, 0, 0],
[0, 0, -1, 0, 1, 1, -1, 0],
[0, 0, 0, -1, 0, 0, 1, 0]])
true_b = np.array([0, 0, 0, 0, 0, 0, 0, 0])
assert (A == true_A).all()
assert (b == true_b).all()
| bsd-3-clause | 1362a54f42672e32d710d10ce9cabca3 | 26.710526 | 71 | 0.480532 | 2.742188 | false | true | false | false |
dit/dit | dit/profiles/entropy_triangle.py | 1 | 5487 | """
The entropy triangle, from [Valverde-Albacete, Francisco Jose, and Carmen
Pelaez-Moreno. "The Multivariate Entropy Triangle and Applications." Hybrid
Artificial Intelligent Systems. Springer International Publishing, 2016.
647-658].
"""
from abc import ABCMeta, abstractmethod
from ..distribution import BaseDistribution
from ..distconst import product_distribution, uniform_like
from ..multivariate import (entropy, residual_entropy, dual_total_correlation,
total_correlation)
__all__ = (
'EntropyTriangle',
'EntropyTriangle2',
)
class BaseEntropyTriangle(metaclass=ABCMeta):
"""
BaseEntropyTriangle
Static Attributes
-----------------
left_label : str
The label for the bottom axis when plotting.
right_label : str
The label for the right axis when plotting.
bottom_label : str
The label for the bottom axis when plotting.
Attributes
----------
dists : [Distribution]
points : list of tuples
Methods
-------
draw
Plot the entropy triangle.
"""
left_label = r"$\operatorname{R}[\mathrm{dist}]$"
right_label = r"$\operatorname{T}[\mathrm{dist}] + \operatorname{B}[\mathrm{dist}]$"
bottom_label = r"$\Delta \operatorname{H}_{\Pi_\overline{X}}$"
def __init__(self, dists):
"""
Initialize the entropy triangle.
Parameters
----------
dists : [Distribution] or Distribution
The list of distributions to plot on the entropy triangle. If a
single distribution is provided, it alone will be computed.
"""
if isinstance(dists, BaseDistribution):
self.dists = [dists]
else:
self.dists = dists
self.points = [self._compute_point(dist) for dist in self.dists]
@staticmethod
@abstractmethod
def _compute_point(dist):
"""
Compute the three normalized axis.
Parameters
----------
dist : Distribution
The distribution to compute values for.
"""
pass
def draw(self, ax=None, setup=True, marker='o', color='k'): # pragma: no cover
"""
Plot the entropy triangle.
Parameters
----------
ax : Axis or None
The matplotlib axis to plot on. If none is provided, one will be
constructed.
setup : bool
If true, labels, tick marks, gridlines, and a boundary will be added
to the plot. Defaults to True.
marker : str
The matplotlib marker shape to use.
color : str
The color of marker to use.
"""
import ternary
if ax is None:
fig, ax = ternary.figure()
fig.set_size_inches(10, 8)
else:
ax = ternary.TernaryAxesSubplot(ax=ax)
if setup:
ax.boundary()
ax.gridlines(multiple=0.1)
fontsize = 20
ax.set_title("Entropy Triangle", fontsize=fontsize)
ax.left_axis_label(self.left_label, fontsize=fontsize)
ax.right_axis_label(self.right_label, fontsize=fontsize)
ax.bottom_axis_label(self.bottom_label, fontsize=fontsize)
ax.ticks(axis='lbr', multiple=0.1, linewidth=1)
ax.clear_matplotlib_ticks()
ax.scatter(self.points, marker=marker, color=color)
ax._redraw_labels()
return ax
class EntropyTriangle(BaseEntropyTriangle):
"""
Construct the Multivariate Entropy Triangle, as defined in
[Valverde-Albacete, Francisco Jose, and Carmen Pelaez-Moreno. "The
Multivariate Entropy Triangle and Applications." Hybrid Artificial
Intelligent Systems. Springer International Publishing, 2016. 647-658]
"""
left_label = r"$\operatorname{R}[\mathrm{dist}]$"
right_label = r"$\operatorname{T}[\mathrm{dist}] + \operatorname{B}[\mathrm{dist}]$"
bottom_label = r"$\Delta \operatorname{H}_{\Pi_\overline{X}}$"
@staticmethod
def _compute_point(dist):
"""
Compute the deviation from uniformity, dependence, and independence of a
distribution.
Parameters
----------
dist : Distribution
The distribution to compute values for.
"""
H_U = entropy(uniform_like(dist))
H_P = entropy(product_distribution(dist))
Delta = H_U - H_P
VI = residual_entropy(dist)
M = H_P - VI
return (Delta / H_U, M / H_U, VI / H_U)
class EntropyTriangle2(BaseEntropyTriangle):
"""
Construct a variation on the Entropy Triangle, comparing the amount of
independence in the distribution (residual entropy) to two types of
dependence (total correlation and dual total correlation).
"""
left_label = r"$\operatorname{B}[\mathrm{dist}]$"
right_label = r"$\operatorname{T}[\mathrm{dist}]$"
bottom_label = r"$\operatorname{R}[\mathrm{dist}]$"
@staticmethod
def _compute_point(dist):
"""
Compute the residual entropy, total correlation, and dual total
correlation for the distribution, and normalize them.
Parameters
----------
dist : Distribution
The distribution to compute values for.
"""
R = residual_entropy(dist)
B = dual_total_correlation(dist)
T = total_correlation(dist)
total = R + B + T
return (R / total, T / total, B / total)
| bsd-3-clause | 46e737cce0bd5fe5fe94f866a02143ba | 29.148352 | 88 | 0.601057 | 4.147392 | false | false | false | false |
dit/dit | dit/algorithms/prune_expand.py | 1 | 3543 | """
Functions for pruning or expanding the sample space of a distribution.
This can be important when calculating meet and join random variables. It
is also important for the calculations of various PID quantities.
"""
from dit.samplespace import ScalarSampleSpace, SampleSpace, CartesianProduct
__all__ = (
'expanded_samplespace',
'pruned_samplespace',
)
def pruned_samplespace(d, sample_space=None):
"""
Returns a new distribution with pruned sample space.
The pruning is such that zero probability outcomes are removed.
Parameters
----------
d : distribution
The distribution used to create the pruned distribution.
sample_space : set
A list of outcomes with zero probability that should be kept in the
sample space. If `None`, then all outcomes with zero probability
will be removed.
Returns
-------
pd : distribution
The distribution with a pruned sample space.
"""
if sample_space is None:
sample_space = []
keep = set(sample_space)
outcomes = []
pmf = []
for o, p in d.zipped(mode='atoms'):
if not d.ops.is_null_exact(p) or o in keep:
outcomes.append(o)
pmf.append(p)
if d.is_joint():
sample_space = SampleSpace(outcomes)
else:
sample_space = ScalarSampleSpace(outcomes)
pd = d.__class__(outcomes, pmf,
sample_space=sample_space, base=d.get_base())
return pd
def expanded_samplespace(d, alphabets=None, union=True):
"""
Returns a new distribution with an expanded sample space.
Expand the sample space so that it is the Cartesian product of the
alphabets for each random variable. Note, only the effective alphabet of
each random variable is used. So if one index in an outcome only has the
value 1, then its alphabet is `[1]`, and not `[0, 1]` for example.
Parameters
----------
d : distribution
The distribution used to create the pruned distribution.
alphabets : list
A list of alphabets, with length equal to the outcome length in `d`.
Each alphabet specifies the alphabet to be used for a single index
random variable. The sample space of the new distribution will be the
Cartesian product of these alphabets.
union : bool
If True, then the alphabet for each random variable is unioned.
The unioned alphabet is then used for each random variable.
Returns
-------
ed : distribution
The distribution with an expanded sample space.
Notes
-----
The default constructor for Distribution will create a Cartesian product
sample space if not sample space is provided.
"""
joint = d.is_joint()
if alphabets is None:
# Note, we sort the alphabets now, so we are possibly changing the
# order of the original sample space.
alphabets = list(map(sorted, d.alphabet))
elif joint and len(alphabets) != d.outcome_length():
L = len(alphabets)
raise Exception("You need to provide {0} alphabets".format(L))
if joint and union:
alphabet = set.union(*map(set, alphabets))
alphabet = sorted(alphabet)
alphabets = [alphabet] * len(alphabets)
if joint:
sample_space = CartesianProduct(alphabets, d._product)
else:
sample_space = ScalarSampleSpace(alphabets)
ed = d.__class__(d.outcomes, d.pmf,
sample_space=sample_space, base=d.get_base())
return ed
| bsd-3-clause | 0877d4bab46cd813d25bd4975f616091 | 29.543103 | 77 | 0.651143 | 4.187943 | false | false | false | false |
dit/dit | dit/multivariate/coinformation.py | 1 | 4885 | """
The co-information aka the multivariate mutual information.
"""
from ..helpers import normalize_rvs
from ..shannon import conditional_entropy as H
from ..utils import powerset, unitful
__all__ = (
'coinformation',
)
@unitful
def coinformation(dist, rvs=None, crvs=None, rv_mode=None):
"""
Calculates the coinformation.
Parameters
----------
dist : Distribution
The distribution from which the coinformation is calculated.
rvs : list, None
The indexes of the random variable used to calculate the coinformation
between. If None, then the coinformation is calculated over all random
variables.
crvs : list, None
The indexes of the random variables to condition on. If None, then no
variables are condition on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
I : float
The coinformation.
Raises
------
ditException
Raised if `dist` is not a joint distribution or if `rvs` or `crvs`
contain non-existant random variables.
Examples
--------
Let's construct a 3-variable distribution for the XOR logic gate and name
the random variables X, Y, and Z.
>>> d = dit.example_dists.Xor()
>>> d.set_rv_names(['X', 'Y', 'Z'])
To calculate coinformations, recall that `rvs` specifies which groups of
random variables are involved. For example, the 3-way mutual information
I[X:Y:Z] is calculated as:
>>> dit.multivariate.coinformation(d, ['X', 'Y', 'Z'])
-1.0
It is a quirk of strings that each element of a string is also an iterable.
So an equivalent way to calculate the 3-way mutual information I[X:Y:Z] is:
>>> dit.multivariate.coinformation(d, 'XYZ')
-1.0
The reason this works is that list('XYZ') == ['X', 'Y', 'Z']. If we want
to use random variable indexes, we need to have explicit groupings:
>>> dit.multivariate.coinformation(d, [[0], [1], [2]], rv_mode='indexes')
-1.0
To calculate the mutual information I[X, Y : Z], we use explicit groups:
>>> dit.multivariate.coinformation(d, ['XY', 'Z'])
Using indexes, this looks like:
>>> dit.multivariate.coinformation(d, [[0, 1], [2]], rv_mode='indexes')
The mutual information I[X:Z] is given by:
>>> dit.multivariate.coinformation(d, 'XZ')
0.0
Equivalently,
>>> dit.multivariate.coinformation(d, ['X', 'Z'])
0.0
Using indexes, this becomes:
>>> dit.multivariate.coinformation(d, [[0], [2]])
0.0
Conditional mutual informations can be calculated by passing in the
conditional random variables. The conditional entropy I[X:Y|Z] is:
>>> dit.multivariate.coinformation(d, 'XY', 'Z')
1.0
Using indexes, this becomes:
>>> rvs = [[0], [1]]
>>> crvs = [[2]] # broken
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
1.0
For the conditional random variables, groupings have no effect, so you
can also obtain this as:
>>> rvs = [[0], [1]]
>>> crvs = [2]
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
1.0
Finally, note that entropy can also be calculated. The entropy H[Z|XY]
is obtained as:
>>> rvs = [[2]]
>>> crvs = [[0], [1]] # broken
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
>>> crvs = [[0, 1]] # broken
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
>>> crvs = [0, 1]
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
>>> rvs = 'Z'
>>> crvs = 'XY'
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
Note that [[0], [1]] says to condition on two groups. But conditioning
is a flat operation and doesn't respect the groups, so it is equal to
a single group of 2 random variables: [[0, 1]]. With random variable
names 'XY' is acceptable because list('XY') = ['X', 'Y'], which is
species two singleton groups. By the previous argument, this is will
be treated the same as ['XY'].
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
def entropy(rvs, dist=dist, crvs=crvs, rv_mode=rv_mode):
"""
Helper function to aid in computing the entropy of subsets.
"""
return H(dist, set().union(*rvs), crvs, rv_mode=rv_mode)
I = sum((-1)**(len(Xs) + 1) * entropy(Xs) for Xs in powerset(rvs))
return I
| bsd-3-clause | 8c0e894ac1ccfa8cdf472abfb4ed9251 | 28.077381 | 79 | 0.623132 | 3.542422 | false | false | false | false |
dit/dit | dit/math/aitchison.py | 1 | 12847 | """
Functions for manipulating compositions using the Aitchison geometry.
Throughout, we assume the compositions are defined such that the sum
of the components is 1.
http://www.springerlink.com/content/wx1166n56n685v82/
"""
import math
import numpy as np
from dit.exceptions import ditException
from dit.math import LogOperations
__all__ = (
'closure',
'subcomposition',
'perturbation',
'power',
'add',
'sub',
'inner',
'norm',
'dist',
'metric',
'clr',
'alr',
'ilr',
'basis',
'clr_inv',
'alr_inv',
'ilr_inv',
)
ops = LogOperations(2)
exp2 = ops.exp
log2 = ops.log
def _gm(x):
"""Returns the geometric means of the rows in `x`.
Parameters
----------
x : NumPy array, shape (k,n)
The k compositions whose geometric means are to be computed.
Returns
-------
x_gm : NumPy array, shape (k,)
The geometric means for the k compositions in `x`.
"""
last_axis = -1
x_gm = x.prod(axis=last_axis) ** (1 / x.shape[last_axis])
return x_gm
def _log2_gm(x):
"""
Returns the log of the geometric means for the rows in `x`.
Parameters
----------
x : NumPy array, shape (k,n)
The k compositions whose geometric means are to be computed.
Returns
-------
x_loggm : NumPy array, shape (k,)
The log geometric means for the k compositions in `x`.
"""
last_axis = -1
x_loggm = 1 / x.shape[last_axis] * np.log2(x).sum(axis=last_axis)
return x_loggm
def closure(x):
"""Returns the closure operation applied to the composition x.
The closure operation renormalizes `x` so that its components sum to one.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The array can be one- or two-dimensional. If one-dimensional, it is
treated as a single composition. If two-dimensional, each row is
treated as a composition and will be normalized individually.
Returns
-------
cx : NumPy array, shape (n,) or (k,n)
The closure of `x`.
"""
s = x.sum(axis=-1, dtype=float)
if np.any(s == 0.0):
raise ditException("x contains an unnormalizable distribution.")
cx = x / s[..., np.newaxis]
return cx
def subcomposition(x, indexes):
"""Returns the subcomposition over the specified indexes.
The subcomposition is the closure of a subset of events in the composition.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition(s) that will be truncated and renormalized.
Returns
-------
xsub : NumPy array, shape (len(`indexes`),) or (k, len(`indexes`))
The subcompositions of `x`.
"""
xsub = closure(x[..., indexes])
return xsub
def perturbation(x, dx):
"""Returns the perturbation of `x` by `dx`.
Perturbation is the closure of the element-wise product. It is equivalent
to translation (inner sum) in standard Euclidean space.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition (or k compositions) to be perturbed.
dx : NumPy array
The perturbation composition or (k perturbation compositions).
Returns
-------
px : NumPy array, shape (n,) or (k,n)
The perturbation of `x` by `dx`.
"""
px = closure(x * dx)
return px
def power(x, a):
"""Returns the result of powering `x` by `a`.
The power transformation is the closure of raising each element to the
`a`th power. It is equivalent to scalar multiplication (outer product).
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition (or k compositions) which will be powered.
a : NumPy array, shape () or (k,)
The power (or k powers) to which the composition(s) is raised.
Returns
-------
px : NumPy array, shape (n,) or (k,n)
The result of powering `x` by `a`.
"""
a = np.ravel(a)[..., np.newaxis]
px = closure(x**a)
if len(x.shape) == 1:
px = px[0]
return px
add = perturbation
def sub(x, y):
"""Returns the difference of compositions.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition that will be subtracted from.
y : NumPy array, shape (n,) or (k,n)
The composition to be subtracted.
Returns
-------
z : NumPy array, shape (n,) or (k,n)
The result of y subtracted from x.
"""
z = perturbation(x, power(y, -1.0)) # 1.0 and not 1 forces coercion
return z
def inner(x, y):
"""Returns the Aitchison inner product between `x` and `y`.
Parameters
----------
x,y : NumPy array, shape (n,) or (k,n)
Compositions to be used in the inner product.
Returns
-------
z : NumPy array, shape () or (k,)
The inner product of x and y. If `x` and `y` are 2D arrays, then the
inner product is done row-wise and `z` is a 1D array of floats.
Otherwise, a float is returned.
"""
if len(x.shape) == 1 and len(y.shape) == 1:
single = True
else:
single = False
x = np.atleast_2d(x)
y = np.atleast_2d(y)
x_loggm = _log2_gm(x)[:, np.newaxis]
y_loggm = _log2_gm(y)[:, np.newaxis]
z = (log2(x) - x_loggm) * (log2(y) - y_loggm)
z = z.sum(axis=1)
if single:
z = z[0]
return z
def norm(x):
"""Returns the norm of `x`.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition(s) to be normed.
Returns
-------
n : NumPy array, shape () or (k,)
The norm(s) of the composition(s).
"""
n = np.sqrt(inner(x, x))
return n
def dist(x, y):
"""Returns the distance between `x` and `y`.
Parameters
----------
x, y : NumPy array, shape (n,) or (k,n)
The compositions whose distance is computed.
Returns
-------
d : NumPy array, shape () or (k,)
The distance between `x` and `y`.
"""
d = norm(sub(x, y))
return d
metric = dist
def clr(x):
"""Returns the centered log-ratio transformation of `x`.
The centered log-ratio transformation of `x` is defined as:
clr(x) = \\log_2( \\frac{x}{g(x)} )
= \\log_2(x) - <\\log_2 x_i>
where g(x) is the geometric mean of `x`.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
Composition(s) to be transformed by clr.
Returns
-------
y : NumPy array, shape (n,) or (k,n)
Centered log-ratio transformation(s) of `x`.
"""
if len(x.shape) == 1:
single = True
else:
single = False
x = np.atleast_2d(x)
x_loggm = _log2_gm(x)[:, np.newaxis]
y = log2(x) - x_loggm
if single:
y = y[0]
return y
def alr(x):
"""Returns the additive log-ratio transformation of `x`.
The additive log-ratio transformation of `x` (with respect to the last
component in the composition) is defined as:
alr(x) = [ \\log_2 x_1 / x_D, \\ldots, \\log_2 \\frac{x_{D-1}}{x_D} ]
where `x` is a composition of length D. Essentially, take the first D-1
components and divide them by the Dth component.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
Composition(s) to be transformed by alr.
Returns
-------
y : NumPy array, shape (n-1,) or (k,n-1)
Additive log-ratio transformation(s) of `x`.
"""
if len(x.shape) == 1:
single = True
else:
single = False
x = np.atleast_2d(x)
y = log2(x[:, :-1]) - log2(x[:, -1][:, np.newaxis])
if single:
y = y[0]
return y
def ilr(x):
"""Returns the isometric log-ratio transformation of `x`.
The isometric log-ratio transformation of `x`, with respect to the
canonical, orthonormal basis defined on the simplex (equation 18 in the
paper), is defined as:
y_i = ilr(x)_i = k_i \\log_2 \\frac{g(x_1,\\ldots,x_i)}{x_{i+1}}
where
k_i = \\sqrt{ \\frac{i}{i+1} }
g_i = \\bigl( \\prod_{k=1}^i x_k \\bigr)^{1/i}
for i = 1, 2, ..., D-1.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
Composition(s) to be transformed by ilr.
Returns
-------
y : NumPy array, shape (n-1,) or (k, n-1)
Isometric log-ratio transformation(s) of `x`.
"""
if len(x.shape) == 1:
single = True
else:
single = False
x = np.atleast_2d(x)
rng = np.arange(1, x.shape[1])
# gm = (x.cumprod(axis=1)[:, :-1])**(1/rng)
loggm = 1 / rng * log2(x).cumsum(axis=1)[:, :-1]
y = loggm - log2(x[:, 1:])
y *= np.sqrt([i / (i + 1) for i in rng]) # same coefficient for each column
if single:
y = y[0]
return y
def ubasis(n):
"""Returns an orthonormal basis wrt the ordinary Euclidean inner product.
The vectors constitute a basis of the `n`-dimensional linear subspace
V_S. There are `n` elements in the basis, each of which lives in an
(`n`+1)-dimensional space. From equation (17), each u_i is defined as:
u_i = \\sqrt{i}{i+1} ( 1/i, ..., 1/i, -1, 0, ..., 0 )
where there are i elements in the sequence of 1/i fractions.
Parameters
----------
n : int
The dimensionality of the basis.
Returns
-------
b : NumPy array, shape (`n`, `n`+1)
The orthonormal basis.
"""
# Upper triangle above main diagonal is zero. Everything else is 1.
u = np.tri(N=n, M=n + 1, k=1)
# Set the lower triangle to 1/i for each row and apply coefficent
rng = np.arange(1, n + 1)
u *= np.array([1 / i for i in rng])[:, np.newaxis]
# the 1st diag is set to -1
u.flat[1::n + 2] = -1
# scale everything
u *= np.array([math.sqrt(i / (i + 1)) for i in rng])[:, np.newaxis]
return u
def basis(n):
"""Returns an orthonormal basis wrt the Aitchison inner product.
Parameters
----------
n : int
The dimensionality of the basis. For example, the 2-simplex has a
two-dimensional basis.
Returns
-------
b : NumPy array, shape (`n`, `n`+1)
The basis for the `n`-simplex, consisting of vectors of length `n`+1.
"""
u = ubasis(n)
b = clr_inv(u)
return b
def clr_inv(xclr):
""""Returns the inverse centered log-ratio transformation of x.
Parameters
----------
xclr : NumPy array, shape (n,) or (k,n)
The centered log-ratio transformations of x.
Returns
-------
x : NumPy array, shape (n,) or (k,n)
The original compositions.
"""
x = closure(exp2(xclr))
return x
def alr_inv(xalr):
"""Returns the inverse additive log-ratio transformation of x.
Parameters
----------
xalr : NumPy array, shape (n,) or (k,n)
The additive log-ratio transformations of x.
Returns
-------
x : NumPy array, shape (n+1,) or (k,n+1)
The original compositions
Notes
-----
The sum of the composition is assumed to be 1.
"""
if len(xalr.shape) == 1:
single = True
else:
single = False
xalr = np.atleast_2d(xalr)
newshape = list(xalr.shape)
newshape[1] += 1
x = np.empty(newshape)
x[:, :-1] = exp2(xalr)
### Now we can exactly solve for the last element, and
### then we can unscale each of the other components.
# x[:, -1] = 1 / (1 + x[:, :-1].sum(axis=1))
# x[:, :-1] *= x[:, -1][:, np.newaxis]
### Or we can set the last element equal to 1 and apply closure.
### This is quicker so we do that.
x[:, -1] = 1
x = closure(x)
return x[0] if single else x
def ilr_inv(xilr):
"""Returns the inverse isometric log-ratio transformation of x.
Parameters
----------
xilr : NumPy array, shape (n,) or (k,n)
The isometric log-ratio transformations of x.
Returns
-------
x : NumPy array, shape (n+1,) or (k,n+1)
The original compositions.
"""
if len(xilr.shape) == 1:
single = True
else:
single = False
xilr = np.atleast_2d(xilr)
newshape = list(xilr.shape)
newshape[1] += 1
x = np.empty(newshape)
b = basis(xilr.shape[1])
for i in range(xilr.shape[0]):
# Here is what you'd normally do:
#
# closure(power(b, xilr[i]).prod(axis=0))
#
# but the product is multiplying a bunch a small numbers and it will
# overflow to zero. This makes the closure operation fail.
# Instead, we need to do everything with logs.
#
poww = power(b, xilr[i])
logprods = ops.mult_reduce(log2(poww), axis=0)
logprobs = ops.normalize(logprods)
x[i] = ops.exp(logprobs)
if single:
x = x[0]
return x
| bsd-3-clause | c14bef48c09bb672bc77f8b286c8da98 | 21.941071 | 80 | 0.560286 | 3.332555 | false | false | false | false |
dit/dit | dit/distconst.py | 1 | 35280 | """
Specialized distribution constructors.
"""
import numpy as np
from itertools import product
from collections import defaultdict
from random import randint
from .distribution import BaseDistribution
from .exceptions import ditException
from .helpers import parse_rvs
from .npdist import Distribution
from .npscalardist import ScalarDistribution
from .utils import digits, powerset
from .validate import validate_pmf
__all__ = (
'mixture_distribution',
'mixture_distribution2',
'noisy',
'erasure',
'modify_outcomes',
'random_scalar_distribution',
'random_distribution',
'simplex_grid',
'uniform_distribution',
'uniform_scalar_distribution',
'insert_rvf',
'RVFunctions',
'product_distribution',
'uniform',
'uniform_like',
'all_dist_structures',
'random_dist_structure',
)
def mixture_distribution(dists, weights, merge=False):
"""
Create a mixture distribution: :math:`\\sum p_i d_i`
Parameters
----------
dists: [Distribution]
List of distributions to mix. Each distribution is assumed to have
the same base and sample space.
weights: [float]
List of weights to use while mixing `dists`. The weights are assumed
to be probability represented in the base of the distributions.
merge: bool
If `True` then distributions will be mixed even if they do not share
the same sample space. The idea is that each of the input distributions
is reinterpreted on a common, merged sample space. If `False`, then
an exception will be raised if incompatible distributions are mixed.
Returns
-------
mix: Distribution
The mixture distribution.
Raises
------
DitException
Raised if there `dists` and `weights` have unequal lengths.
InvalidNormalization
Raised if the weights do not sum to unity.
InvalidProbability
Raised if the weights are not valid probabilities.
IncompatibleOutcome
Raised if the sample spaces for each distribution are not compatible.
"""
weights = np.asarray(weights)
if len(dists) != len(weights):
msg = "Length of `dists` and `weights` must be equal."
raise ditException(msg)
ops = dists[0].ops
validate_pmf(weights, ops)
if merge:
vals = lambda o: [(ops.mult(w, d[o]) if o in d else 0)
for w, d in zip(weights, dists)]
else:
vals = lambda o: [ops.mult(w, d[o])
for w, d in zip(weights, dists)]
outcomes = set().union(*[d.outcomes for d in dists])
pmf = [ops.add_reduce(np.array(vals(o))) for o in outcomes]
mix = dists[0].__class__(tuple(outcomes), pmf, base=ops.get_base())
return mix
def mixture_distribution2(dists, weights):
"""
Create a mixture distribution: :math:`\\sum p_i d_i`
This version assumes that the pmf for each distribution is of the same
form, and as a result, will be faster than `mixture_distribution`.
Explicitly, it assumes that the sample space is ordered exactly the same
for each distribution and that the outcomes currently represented in the
pmf are the same as well. Using it in any other case will result in
incorrect output or an exception.
Parameters
----------
dists: [Distribution]
List of distributions to mix. Each distribution is assumed to have
the same base and sample space.
weights: [float]
List of weights to use while mixing `dists`. The weights are assumed
to be probability represented in the base of the distributions.
Returns
-------
mix: Distribution
The mixture distribution.
Raises
------
DitException
Raised if there `dists` and `weights` have unequal lengths.
InvalidNormalization
Raised if the weights do not sum to unity.
InvalidProbability
Raised if the weights are not valid probabilities.
IncompatibleDistribution
Raised if the sample spaces for each distribution are not compatible.
"""
weights = np.asarray(weights)
if len(dists) != len(weights):
msg = "Length of `dists` and `weights` must be equal."
raise ditException(msg)
# Also just quickly make sure that the pmfs have the same length. In
# general, NumPy should give a value error complaining that it cannot
# broadcast the smaller array. But if a pmf has length 1, then it can
# be broadcast. This would make it harder to detect errors.
shapes = {dist.pmf.shape for dist in dists}
if len(shapes) != 1:
raise ValueError('All pmfs must have the same length.')
ops = dists[0].ops
validate_pmf(weights, ops)
mix = dists[0].copy()
ops.mult_inplace(mix.pmf, weights[0])
for dist, weight in zip(dists[1:], weights[1:]):
ops.add_inplace(mix.pmf, ops.mult(dist.pmf, weight))
return mix
def noisy(dist, noise=1 / 2):
"""
Construct a noisy version of `dist`.
Parameters
----------
dist : Distribution
The distribution to fuzz.
noise : float, 0 <= `noise` <= 1
The noise level.
Returns
-------
fuzzy: Distribution
The noisy distribution.
"""
fuzz = uniform(list(product(*dist.alphabet)))
if isinstance(dist.outcomes[0], str):
fuzz = modify_outcomes(fuzz, lambda o: ''.join(o))
fuzzy = mixture_distribution([dist, fuzz], [1 - noise, noise], merge=True)
return fuzzy
def erasure(dist, epsilon=1 / 2):
"""
Construct a version of `dist` where each variable has been passed through
an erasure channel.
Parameters
----------
dist : Distribution
The distribution to fuzz.
epsilon : float, 0 <= `epsilon` <= 1
The erasure probability.
Returns
-------
erased : Distribution
The erased distribution.
"""
ctor = dist._outcome_ctor
outcomes = defaultdict(float)
n = dist.outcome_length()
for outcome, prob in dist.zipped():
for o in product(*zip(outcome, '_' * n)):
count = o.count('_')
outcomes[ctor(o)] += prob * epsilon**count * (1 - epsilon)**(n - count)
return Distribution(outcomes)
def modify_outcomes(dist, ctor):
"""
Returns `dist` but with modified outcomes, after passing them to `ctor`.
Parameters
----------
dist : Distribution, ScalarDistribution
The distribution to be modified.
ctor : callable
The constructor that receives an existing outcome and returns a new
modified outcome.
Returns
-------
d : Distribution, ScalarDistribution
The modified distribution.
Examples
--------
Convert joint tuple outcomes to strings.
>>> d = dit.uniform_distribution(3, ['0', '1'])
>>> d2 = dit.modify_outcomes(d, lambda x: ''.join(x))
Increment scalar outcomes by 1.
>>> d = dit.uniform_scalar_distribution(5)
>>> d2 = dit.modify_outcomes(d, lambda x: x + 1)
"""
outcomes = tuple(map(ctor, dist.outcomes))
ops = dist.ops
newdist = {}
for outcome, p in zip(outcomes, dist.pmf):
newdist[outcome] = ops.add(p, newdist.get(outcome, ops.zero))
outcomes = list(newdist.keys())
pmf = np.array(list(newdist.values()))
d = dist.__class__(outcomes, pmf, base=dist.get_base())
return d
def random_scalar_distribution(n, base=None, alpha=None, prng=None):
"""
Returns a random scalar distribution over `n` outcomes.
The distribution is sampled uniformly over the space of distributions on
the `n`-simplex. If `alpha` is not `None`, then the distribution is
sampled from the Dirichlet distribution with parameter `alpha`.
Parameters
----------
n : int | list
The number of outcomes, or a list containing the outcomes.
base : float, 'linear', 'e'
The desired base for the distribution probabilities.
alpha : list | None
The concentration parameters defining that the Dirichlet distribution
used to draw the random distribution. If `None`, then each of the
concentration parameters are set equal to 1.
"""
import dit.math
if prng is None:
prng = dit.math.prng
try:
nOutcomes = len(n)
except TypeError:
nOutcomes = n
d = uniform_scalar_distribution(n)
if alpha is None:
alpha = np.ones(len(d))
elif len(alpha) != nOutcomes:
raise ditException('Number of concentration parameters must be `n`.')
pmf = prng.dirichlet(alpha)
d.pmf = pmf
# Maybe we should use ditParams['base'] when base is None?
if base is not None:
d.set_base(base)
return d
def random_distribution(outcome_length, alphabet_size, base=None, alpha=None, prng=None):
"""
Returns a random distribution drawn uniformly from the simplex.
The distribution is sampled uniformly over the space of distributions on
the `n`-simplex, where `n` is equal to `alphabet_size**outcome_length`.
If `alpha` is not `None`, then the distribution is sampled from the
Dirichlet distribution with parameter `alpha`.
Parameters
----------
outcome_length : int
The length of the outcomes.
alphabet_size : int, list
The alphabet used to construct the outcomes of the distribution. If an
integer, then the alphabet will consist of integers from 0 to k-1 where
k is the alphabet size. If a list, then the elements are used as the
alphabet.
base : float, 'linear', 'e'
The desired base for the distribution probabilities.
alpha : list | None
The concentration parameters defining that the Dirichlet distribution
used to draw the random distribution. If `None`, then each of the
concentration parameters are set equal to 1.
Returns
-------
d : Distribution.
A uniform sampled distribution.
"""
import dit.math
if prng is None:
prng = dit.math.prng
d = uniform_distribution(outcome_length, alphabet_size)
if alpha is None:
alpha = np.ones(len(d))
elif len(alpha) != len(d):
raise ditException('Invalid number of concentration parameters.')
pmf = prng.dirichlet(alpha)
d.pmf = pmf
# Maybe we should use ditParams['base'] when base is None?
if base is not None:
d.set_base(base)
return d
def simplex_grid(length, subdivisions, using=None, inplace=False):
"""Returns a generator over distributions, determined by a grid.
The grid is "triangular" in Euclidean space.
The total number of points on the grid is::
(subdivisions + length - 1)! / (subdivisions)! / (length-1)!
and is equivalent to the total number of ways ``n`` indistinguishable items
can be placed into ``k`` distinguishable slots, where n=`subdivisions` and
k=`length`.
Parameters
----------
length : int
The number of elements in each distribution. The dimensionality
of the simplex is length-1.
subdivisions : int
The number of subdivisions for the interval [0, 1]. Each component
will take on values at the boundaries of the subdivisions. For example,
one subdivision means each component can take the values 0 or 1 only.
Two subdivisions corresponds to :math:`[[0, 1/2], [1/2, 1]]` and thus,
each component can take the values 0, 1/2, or 1. A common use case is
to exponentially increase the number of subdivisions at each level.
That is, subdivisions would be: 2**0, 2**1, 2**2, 2**3, ...
using : None, callable, or distribution
If None, then scalar distributions on integers are yielded. If `using`
is a distribution, then each yielded distribution is a copy of `using`
with its pmf set appropriately. For other callables, a tuple of the
pmf is passed to the callable and then yielded.
inplace : bool
If `True`, then each yielded distribution is the same Python object,
but with a new probability mass function. If `False`, then each yielded
distribution is a unique Python object and can be safely stored for
other calculations after the generator has finished. This keyword has
an effect only when `using` is None or some distribution.
Examples
--------
>>> list(dit.simplex_grid(2, 2, using=tuple))
[(0.0, 1.0), (0.25, 0.75), (0.5, 0.5), (0.75, 0.25), (1.0, 0.0)]
"""
from dit.math.combinatorics import slots
if subdivisions < 1:
raise ditException('`subdivisions` must be greater than or equal to 1')
elif length < 1:
raise ditException('`length` must be greater than or equal to 1')
gen = slots(int(subdivisions), int(length), normalized=True)
if using is None:
using = random_scalar_distribution(length)
if using is tuple:
for pmf in gen:
yield pmf
elif not isinstance(using, BaseDistribution):
for pmf in gen:
yield using(pmf)
else:
if length != len(using.pmf):
raise Exception('`length` must match the length of pmf')
if inplace:
d = using
for pmf in gen:
d.pmf[:] = pmf
yield d
else:
for pmf in gen:
d = using.copy()
d.pmf[:] = pmf
yield d
def uniform_scalar_distribution(n, base=None):
"""
Returns a uniform scalar distribution over `n` outcomes.
Parameters
----------
n : int, list
If an integer, then the outcomes are integers from 0 to n-1. If a list
then the elements are treated as the outcomes.
Returns
-------
d : ScalarDistribution
A uniform scalar distribution.
"""
try:
nOutcomes = len(n)
outcomes = n
except TypeError:
nOutcomes = n
outcomes = tuple(range(n))
pmf = [1 / nOutcomes] * nOutcomes
d = ScalarDistribution(outcomes, pmf, base='linear')
# Maybe we should use ditParams['base'] when base is None?
if base is not None:
d.set_base(base)
return d
def uniform_distribution(outcome_length, alphabet_size, base=None):
"""
Returns a uniform distribution.
Parameters
----------
outcome_length : int
The length of the outcomes.
alphabet_size : int, list of lists
The alphabets used to construct the outcomes of the distribution. If an
integer, then the alphabet for each random variable will be the same,
consisting of integers from 0 to k-1 where k is the alphabet size.
If a list, then the elements are used as the alphabet for each random
variable. If the list has a single element, then it will be used
as the alphabet for each random variable.
base : float, 'linear', 'e'
The desired base for the distribution probabilities.
Returns
-------
d : Distribution.
A uniform distribution.
Examples
--------
Each random variable has the same standardized alphabet: [0,1]
>>> d = dit.uniform_distribution(2, 2)
Each random variable has its own alphabet.
>>> d = dit.uniform_distribution(2, [[0,1],[1,2]])
Both random variables have ['H','T'] as an alphabet.
>>> d = dit.uniform_distribution(2, [['H','T']])
"""
try:
int(alphabet_size)
except TypeError:
# Assume it is a list of lists.
alphabet = alphabet_size
# Autoextend if only one alphabet is provided.
if len(alphabet) == 1:
alphabet = [alphabet[0]] * outcome_length
elif len(alphabet) != outcome_length:
raise TypeError("outcome_length does not match number of rvs.")
else:
# Build the standard alphabet.
alphabet = [tuple(range(alphabet_size))] * outcome_length
try:
Z = np.prod(list(map(len, alphabet)))
try:
# for some reason numpypy.prod returns a list, and pypy can't handle
# multiplying a list by a numpy float.
Z = int(Z[0])
except: # noqa: S110
pass
except TypeError:
raise TypeError("alphabet_size must be an int or list of lists.")
pmf = [1 / Z] * Z
outcomes = tuple(product(*alphabet))
d = Distribution(outcomes, pmf, base='linear')
# Maybe we should use ditParams['base'] when base is None?
if base is not None:
d.set_base(base)
return d
def uniform_like(dist):
"""
Returns a uniform distribution with the same outcome length, alphabet size, and base as `dist`.
Parameters
----------
dist : Distribution
The distribution to mimic.
"""
outcome_length = dist.outcome_length()
alphabet_size = dist.alphabet
base = dist.get_base()
return uniform_distribution(outcome_length, alphabet_size, base)
def uniform(outcomes, base=None):
"""
Produces a uniform distribution over `outcomes`.
Parameters
----------
outcomes : iterable
The set of outcomes with which to construct the distribution.
base : float, 'linear', 'e'
The desired base for the distribution probabilities.
Returns
-------
d : Distribution
A uniform distribution over `outcomes`.
Raises
------
ditException
Raised if the elements of `outcomes` do not all have the same length.
TypeError
Raised if `outcomes` is not iterable.
"""
outcomes = list(outcomes)
length = len(outcomes)
pmf = [1 / length] * length
d = Distribution(outcomes, pmf)
# Maybe we should use ditParams['base'] when base is None?
if base is not None:
d.set_base(base)
return d
def insert_rvf(d, func, index=-1):
"""
Returns a new distribution with an added random variable at index `index`.
The new random variable must be a function of the other random variables.
By this, we mean that the entropy of the new random variable conditioned
on the original random variables should be zero.
Parameters
----------
dist : Distribution
The distribution used to construct the new distribution.
func : callable | list of callable
A function which takes a single argument---the value of the previous
random variables---and returns a new random variable. Note, the return
value will be added to the outcome using `__add__`, and so it should be
a hashable, orderable sequence (as every outcome must be). If a list of
callables is provided, then multiple random variables are added
simultaneously and will appear in the same order as the list.
index : int
The index at which to insert the random variable. A value of -1 is
will append the random variable to the end.
Returns
-------
d : Distribution
The new distribution.
Examples
--------
>>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
>>> def xor(outcome):
... return str(int(outcome[0] != outcome[1]))
...
>>> d2 = dit.insert_rvf(d, xor)
>>> d.outcomes
('000', '011', '101', '110')
"""
try:
func[0]
except TypeError:
funcs = [func]
else:
funcs = func
partial_outcomes = [map(func, d.outcomes) for func in funcs]
# Now "flatten" the new contributions.
partial_outcomes = [d._outcome_ctor([o for o_list in outcome for o in o_list])
for outcome in zip(*partial_outcomes)]
new_outcomes = zip(d.outcomes, partial_outcomes)
if index == -1:
outcomes = [old + new for old, new in new_outcomes]
else:
outcomes = [old[:index] + new + old[index:] for old, new in new_outcomes]
d2 = Distribution(outcomes, d.pmf.copy(), base=d.get_base())
return d2
class RVFunctions(object):
"""
Helper class for building new random variables.
Each new random variable is a function of the existing random variables.
So for each outcome in the original distribution, there can be only one
possible value for the new random variable.
Some methods may make assumptions about the sample space. For example, the
:meth:`xor` method assumes the sample space consists of 0-like and 1-like
outcomes.
"""
def __init__(self, d):
"""
Initialize the random variable function creator.
Parameters
----------
d : Distribution
The distribution used to create the new random variables.
Examples
--------
>>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
>>> bf = dit.RVFunctions(d)
>>> d = dit.insert_rvf(d, bf.xor([0,1]))
>>> d = dit.insert_rvf(d, bf.xor([1,2]))
>>> d.outcomes
('0000', '0110', '1011', '1101')
"""
if not isinstance(d, Distribution):
raise ditException('`d` must be a Distribution instance.')
try:
d.outcomes[0] + ''
except TypeError:
is_int = True
else:
is_int = False
self.is_int = is_int
self.L = d.outcome_length()
self.ctor = d._outcome_ctor
self.outcome_class = d._outcome_class
def xor(self, indices):
"""
Returns a callable which returns the logical XOR of the given indices.
Outcomes are assumed to be strings of '0' and '1', or tuples of 0 and 1.
The returned function handles both cases appropriately.
Parameters
----------
indices : list
A list of two indices used to take the XOR.
Returns
-------
func : function
A callable implementing the XOR function. It receives a single
argument, the outcome and returns an outcome for the calculation.
Examples
--------
>>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
>>> bf = dit.RVFunctions(d)
>>> d = dit.insert_rvf(d, bf.xor([0,1]))
>>> d.outcomes
('000', '011', '101', '110')
"""
if self.is_int:
def func(outcome):
result = outcome[indices[0]] != outcome[indices[1]]
return (int(result),)
else:
def func(outcome):
result = outcome[indices[0]] != outcome[indices[1]]
return str(int(result))
return func
def from_mapping(self, mapping, force=True):
"""
Returns a callable implementing a random variable via a mapping.
Parameters
----------
mapping : dict
A mapping from outcomes to values of the new random variable.
force : bool
Ideally, the values of `mapping` should be satisfy the requirements
of all outcomes (hashable, ordered sequences), but if `force` is
`True`, we will attempt to use the distribution's outcome
constructor and make sure that they are. If they are not, then
the outcomes will be placed into a 1-tuple. This is strictly
a convenience for users. As an example, suppose the outcomes are
strings, the values of `mapping` can also be strings without issue.
However, if the outcomes are tuples of integers, then the values
*should* also be tuples. When `force` is `True`, then the values
can be integers and then they will be transformed into 1-tuples.
Returns
-------
func : function
A callable implementing the desired function. It receives a single
argument, the outcome, and returns an outcome for the calculation.
Examples
--------
>>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
>>> bf = dit.RVFunctions(d)
>>> mapping = {'00': '0', '01': '1', '10': '1', '11': '0'}
>>> d = dit.insert_rvf(d, bf.from_mapping(mapping))
>>> d.outcomes
('000', '011', '101', '110')
Same example as above but now with tuples.
>>> d = dit.Distribution([(0,0), (0,1), (1,0), (1,1)], [1/4]*4)
>>> bf = dit.RVFunctions(d)
>>> mapping = {(0,0): 0, (0,1): 1, (1,0): 1, (1,1): 0}
>>> d = dit.insert_rvf(d, bf.from_mapping(mapping, force=True))
>>> d.outcomes
((0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 0))
See Also
--------
dit.modify_outcomes
"""
ctor = self.ctor
if force:
try:
list(map(ctor, mapping.values()))
except (TypeError, ditException):
values = [ctor([o]) for o in mapping.values()]
mapping = dict(zip(mapping.keys(), values))
def func(outcome):
return mapping[outcome]
return func
def from_partition(self, partition):
"""
Returns a callable implementing a function specified by a partition.
The partition must divide the sample space of the distribution. The
number of equivalence classes, n, determines the number of values for
the random variable. The values are integers from 0 to n-1, but if the
outcome class of the distribution is string, then this function will
use the first n letters from:
'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
as values for the random variable. So random variables with more than
62 outcomes are not supported by this function.
Parameters
----------
partition : list
A list of iterables. The outer list is required to determine the
order of the new random variable
Returns
-------
func : function
A callable implementing the desired function. It receives a single
argument, the outcome, and returns an outcome of the new random
variable that is specified by the partition.
Examples
--------
>>> d = dit.Distribution(['00', '01', '10', '11'], [1/4]*4)
>>> bf = dit.RVFunctions(d)
>>> partition = (('00','11'), ('01', '10'))
>>> d = dit.insert_rvf(d, bf.from_partition(partition))
>>> d.outcomes
('000', '011', '101', '110')
"""
# Practically, we support the str class. This is bytes in Python
# versions <3 and unicode >3.
alphabet = '0123456789'
letters = 'abcdefghijklmnopqrstuvwxyz'
alphabet += letters
alphabet += letters.upper()
n = len(partition)
if self.outcome_class == str:
if n > len(alphabet):
msg = 'Number of outcomes is too large.'
raise NotImplementedError(msg)
vals = alphabet[:n]
else:
vals = range(n)
mapping = {}
# Probably could do this more efficiently.
for i, eqclass in enumerate(partition):
for outcome in eqclass:
mapping[self.ctor(outcome)] = vals[i]
return self.from_mapping(mapping, force=True)
def from_hexes(self, hexes):
"""
Returns a callable implementing a boolean function on up to 4-bits.
Outcomes are assumed to be strings of '0' and '1', or tuples of 0 and 1.
The returned function handles both cases appropriately.
The original outcomes are represented in base-16 as one of the letters
in '0123456789ABCDEF' (not case sensitive). Then, each boolean function
is a specification of the outcomes for which it be should true. The
random variable will be false for the complement of this set---so this
function additional assumes full support. For example, if the random
variable is a function of 3-bits and should be true only for the
outcomes 2='010' or 7='111', then `hexes` should be '27'. This nicely
handles 1- and 2-, and 4-bit inputs in a similar fashion.
Parameters
----------
hexes : str
A string of base-16 characters, each element represents an
(up to) 4-bit outcome for which the random variable should be true.
Returns
-------
func : function
A callable implementing the desired function. It receives a single
argument, the outcome, and returns an outcome for the calculation.
Examples
--------
>>> outcomes = ['000', '001', '010', '011', '100', '101', '110', '111']
>>> pmf = [1/8] * 8
>>> d = dit.Distribution(outcomes, pmf)
>>> bf = dit.RVFunctions(d)
>>> d = dit.insert_rvf(d, bf.from_hexes('27'))
>>> d.outcomes
('0000', '0010', '0101', '0110', '1000', '1010', '1100', '1111')
"""
base = 16
template = "{0:0{1}b}"
outcomes = [template.format(int(h, base), self.L) for h in hexes]
if self.is_int:
outcomes = [tuple(map(int, o)) for o in outcomes]
outcomes = set(outcomes)
if self.is_int:
def func(outcome):
result = outcome in outcomes
return (int(result),)
else:
def func(outcome):
result = outcome in outcomes
return str(int(result))
return func
def product_distribution(dist, rvs=None, rv_mode=None, base=None):
"""
Returns a new distribution which is the product of marginals.
Parameters
----------
dist : distribution
The original distribution.
rvs : sequence
A sequence whose elements are also sequences. Each inner sequence
defines the marginal distribution used to create the new distribution.
The inner sequences must be pairwise mutually exclusive, but not every
random variable in the original distribution must be specified. If
`None`, then a product distribution of one-way marginals is
constructed.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options
are: {'indices', 'names'}. If equal to 'indices', then the elements
of `rvs` are interpreted as random variable indices. If equal to
'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted.
base : float, 'linear', 'e'
The desired base for the distribution probabilities.
Returns
-------
d : Distribution
The product distribution.
Examples
--------
>>> d = dit.example_dists.Xor()
>>> pd = product_distribution(d, [(0,), (1,), (2,)])
"""
if not dist.is_joint():
raise Exception("A joint distribution is required.")
if rvs is None:
names = dist.get_rv_names()
if names is None:
names = range(dist.outcome_length())
indexes = [[i] for i in names]
else:
# We do not allow repeats and want to keep the order.
# Use argument [1] since we don't need the names.
parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
unique=True, sort=False)[1]
indexes = [parse(rv) for rv in rvs]
all_indexes = [idx for index_list in indexes for idx in index_list]
if len(all_indexes) != len(set(all_indexes)):
raise Exception('The elements of `rvs` have nonzero intersection.')
marginals = [dist.marginal(index_list, rv_mode=rv_mode) for index_list in indexes]
ctor = dist._outcome_ctor
ops = dist.ops
outcomes = []
pmf = []
for pairs in product(*[marg.zipped() for marg in marginals]):
outcome = []
prob = []
for pair in pairs:
outcome.extend(pair[0])
prob.append(pair[1])
outcomes.append(ctor(outcome))
pmf.append(ops.mult_reduce(prob))
d = Distribution(outcomes, pmf, validate=False)
# Maybe we should use ditParams['base'] when base is None?
if base is not None:
d.set_base(base)
return d
def all_dist_structures(outcome_length, alphabet_size):
"""
Return an iterator of distributions over the
2**(`alphabet_size`**`outcome_length`) possible combinations of joint
events.
Parameters
----------
outcome_length : int
The length of outcomes to consider.
alphabet_length : int
The size of the alphabet for each random variable.
Yields
------
d : Distribution
A uniform distribution over a subset of the possible joint events.
"""
alphabet = ''.join(str(i) for i in range(alphabet_size))
words = product(alphabet, repeat=outcome_length)
topologies = powerset(words)
next(topologies) # the first element is the null set
for t in topologies:
outcomes = [''.join(_) for _ in t]
yield uniform(outcomes)
def _int_to_dist(number, outcome_length, alphabet_size):
"""
Construct the `number`th distribution over `outcome_length` variables each
with an alphabet of size `alphabet_size`.
Parameters
----------
number : int
The index of the distribution to construct.
outcome_length : int
The number of random variables in each joint event.
alphabet_size : int
The size of the alphabet for each random variable.
Returns
-------
d : Distribution
A uniform distribution over the joint event specified by the parameters.
"""
alphabet = ''.join(str(i) for i in range(alphabet_size))
words = [''.join(word) for word in product(alphabet, repeat=outcome_length)]
events = digits(number, 2, pad=alphabet_size**outcome_length, big_endian=False)
pmf = [p / sum(events) for p in events]
return Distribution(words, pmf)
def random_dist_structure(outcome_length, alphabet_size):
"""
Return a uniform distribution over a random subset of the
`alphabet_size`**`outcome_length` possible joint events.
Parameters
----------
outcome_length : int
The number of random variables in each joint event.
alphabet_size : int
The size of the alphabet for each random variable.
Returns
-------
d : Distribution
A uniform distribution over a random subset of joint events.
"""
bound = 2**(alphabet_size**outcome_length)
return _int_to_dist(randint(1, bound - 1), outcome_length, alphabet_size)
def _combine_scalar_dists(d1, d2, op):
"""
Combines `d1` and `d2` according to `op`, as though they are independent.
Parameters
----------
d1 : ScalarDistribution
The first distribution
d2 : ScalarDistribution
The second distribution
op : function
Function used to combine outcomes
Returns
-------
d : ScalarDistribution
The two distributions combined via `op`
"""
# Copy to make sure we don't lose precision when converting.
d2 = d2.copy(base=d1.get_base())
dist = defaultdict(float)
for (o1, p1), (o2, p2) in product(d1.zipped(), d2.zipped()):
dist[op(o1, o2)] += d1.ops.mult(p1, p2)
return ScalarDistribution(*zip(*dist.items()), base=d1.get_base())
| bsd-3-clause | be542db41ce65503dc1855993cb1b0df | 31.248629 | 99 | 0.609637 | 4.160377 | false | false | false | false |
pallets/jinja | docs/examples/inline_gettext_extension.py | 3 | 2398 | import re
from jinja2.exceptions import TemplateSyntaxError
from jinja2.ext import Extension
from jinja2.lexer import count_newlines
from jinja2.lexer import Token
_outside_re = re.compile(r"\\?(gettext|_)\(")
_inside_re = re.compile(r"\\?[()]")
class InlineGettext(Extension):
"""This extension implements support for inline gettext blocks::
<h1>_(Welcome)</h1>
<p>_(This is a paragraph)</p>
Requires the i18n extension to be loaded and configured.
"""
def filter_stream(self, stream):
paren_stack = 0
for token in stream:
if token.type != "data":
yield token
continue
pos = 0
lineno = token.lineno
while True:
if not paren_stack:
match = _outside_re.search(token.value, pos)
else:
match = _inside_re.search(token.value, pos)
if match is None:
break
new_pos = match.start()
if new_pos > pos:
preval = token.value[pos:new_pos]
yield Token(lineno, "data", preval)
lineno += count_newlines(preval)
gtok = match.group()
if gtok[0] == "\\":
yield Token(lineno, "data", gtok[1:])
elif not paren_stack:
yield Token(lineno, "block_begin", None)
yield Token(lineno, "name", "trans")
yield Token(lineno, "block_end", None)
paren_stack = 1
else:
if gtok == "(" or paren_stack > 1:
yield Token(lineno, "data", gtok)
paren_stack += -1 if gtok == ")" else 1
if not paren_stack:
yield Token(lineno, "block_begin", None)
yield Token(lineno, "name", "endtrans")
yield Token(lineno, "block_end", None)
pos = match.end()
if pos < len(token.value):
yield Token(lineno, "data", token.value[pos:])
if paren_stack:
raise TemplateSyntaxError(
"unclosed gettext expression",
token.lineno,
stream.name,
stream.filename,
)
| bsd-3-clause | b9d0d518707363a7ef9891745c1b7bb4 | 32.305556 | 68 | 0.477481 | 4.59387 | false | false | false | false |
pallets/jinja | src/jinja2/filters.py | 4 | 53509 | """Built-in template filters used with the ``|`` operator."""
import math
import random
import re
import typing
import typing as t
from collections import abc
from itertools import chain
from itertools import groupby
from markupsafe import escape
from markupsafe import Markup
from markupsafe import soft_str
from .async_utils import async_variant
from .async_utils import auto_aiter
from .async_utils import auto_await
from .async_utils import auto_to_list
from .exceptions import FilterArgumentError
from .runtime import Undefined
from .utils import htmlsafe_json_dumps
from .utils import pass_context
from .utils import pass_environment
from .utils import pass_eval_context
from .utils import pformat
from .utils import url_quote
from .utils import urlize
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
from .nodes import EvalContext
from .runtime import Context
from .sandbox import SandboxedEnvironment # noqa: F401
class HasHTML(te.Protocol):
def __html__(self) -> str:
pass
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
K = t.TypeVar("K")
V = t.TypeVar("V")
def ignore_case(value: V) -> V:
"""For use as a postprocessor for :func:`make_attrgetter`. Converts strings
to lowercase and returns other types as-is."""
if isinstance(value, str):
return t.cast(V, value.lower())
return value
def make_attrgetter(
environment: "Environment",
attribute: t.Optional[t.Union[str, int]],
postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
default: t.Optional[t.Any] = None,
) -> t.Callable[[t.Any], t.Any]:
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
parts = _prepare_attribute_parts(attribute)
def attrgetter(item: t.Any) -> t.Any:
for part in parts:
item = environment.getitem(item, part)
if default is not None and isinstance(item, Undefined):
item = default
if postprocess is not None:
item = postprocess(item)
return item
return attrgetter
def make_multi_attrgetter(
environment: "Environment",
attribute: t.Optional[t.Union[str, int]],
postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
) -> t.Callable[[t.Any], t.List[t.Any]]:
"""Returns a callable that looks up the given comma separated
attributes from a passed object with the rules of the environment.
Dots are allowed to access attributes of each attribute. Integer
parts in paths are looked up as integers.
The value returned by the returned callable is a list of extracted
attribute values.
Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
"""
if isinstance(attribute, str):
split: t.Sequence[t.Union[str, int, None]] = attribute.split(",")
else:
split = [attribute]
parts = [_prepare_attribute_parts(item) for item in split]
def attrgetter(item: t.Any) -> t.List[t.Any]:
items = [None] * len(parts)
for i, attribute_part in enumerate(parts):
item_i = item
for part in attribute_part:
item_i = environment.getitem(item_i, part)
if postprocess is not None:
item_i = postprocess(item_i)
items[i] = item_i
return items
return attrgetter
def _prepare_attribute_parts(
attr: t.Optional[t.Union[str, int]]
) -> t.List[t.Union[str, int]]:
if attr is None:
return []
if isinstance(attr, str):
return [int(x) if x.isdigit() else x for x in attr.split(".")]
return [attr]
def do_forceescape(value: "t.Union[str, HasHTML]") -> Markup:
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, "__html__"):
value = t.cast("HasHTML", value).__html__()
return escape(str(value))
def do_urlencode(
value: t.Union[str, t.Mapping[str, t.Any], t.Iterable[t.Tuple[str, t.Any]]]
) -> str:
"""Quote data for use in a URL path or query using UTF-8.
Basic wrapper around :func:`urllib.parse.quote` when given a
string, or :func:`urllib.parse.urlencode` for a dict or iterable.
:param value: Data to quote. A string will be quoted directly. A
dict or iterable of ``(key, value)`` pairs will be joined as a
query string.
When given a string, "/" is not quoted. HTTP servers treat "/" and
"%2F" equivalently in paths. If you need quoted slashes, use the
``|replace("/", "%2F")`` filter.
.. versionadded:: 2.7
"""
if isinstance(value, str) or not isinstance(value, abc.Iterable):
return url_quote(value)
if isinstance(value, dict):
items: t.Iterable[t.Tuple[str, t.Any]] = value.items()
else:
items = value # type: ignore
return "&".join(
f"{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}" for k, v in items
)
@pass_eval_context
def do_replace(
eval_ctx: "EvalContext", s: str, old: str, new: str, count: t.Optional[int] = None
) -> str:
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return str(s).replace(str(old), str(new), count)
if (
hasattr(old, "__html__")
or hasattr(new, "__html__")
and not hasattr(s, "__html__")
):
s = escape(s)
else:
s = soft_str(s)
return s.replace(soft_str(old), soft_str(new), count)
def do_upper(s: str) -> str:
"""Convert a value to uppercase."""
return soft_str(s).upper()
def do_lower(s: str) -> str:
"""Convert a value to lowercase."""
return soft_str(s).lower()
def do_items(value: t.Union[t.Mapping[K, V], Undefined]) -> t.Iterator[t.Tuple[K, V]]:
"""Return an iterator over the ``(key, value)`` items of a mapping.
``x|items`` is the same as ``x.items()``, except if ``x`` is
undefined an empty iterator is returned.
This filter is useful if you expect the template to be rendered with
an implementation of Jinja in another programming language that does
not have a ``.items()`` method on its mapping type.
.. code-block:: html+jinja
<dl>
{% for key, value in my_dict|items %}
<dt>{{ key }}
<dd>{{ value }}
{% endfor %}
</dl>
.. versionadded:: 3.1
"""
if isinstance(value, Undefined):
return
if not isinstance(value, abc.Mapping):
raise TypeError("Can only get item pairs from a mapping.")
yield from value.items()
@pass_eval_context
def do_xmlattr(
eval_ctx: "EvalContext", d: t.Mapping[str, t.Any], autospace: bool = True
) -> str:
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = " ".join(
f'{escape(key)}="{escape(value)}"'
for key, value in d.items()
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = " " + rv
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s: str) -> str:
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_str(s).capitalize()
_word_beginning_split_re = re.compile(r"([-\s({\[<]+)")
def do_title(s: str) -> str:
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
return "".join(
[
item[0].upper() + item[1:].lower()
for item in _word_beginning_split_re.split(soft_str(s))
if item
]
)
def do_dictsort(
value: t.Mapping[K, V],
case_sensitive: bool = False,
by: 'te.Literal["key", "value"]' = "key",
reverse: bool = False,
) -> t.List[t.Tuple[K, V]]:
"""Sort a dict and yield (key, value) pairs. Python dicts may not
be in the order you want to display them in, so sort them first.
.. sourcecode:: jinja
{% for key, value in mydict|dictsort %}
sort the dict by key, case insensitive
{% for key, value in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for key, value in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for key, value in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
if by == "key":
pos = 0
elif by == "value":
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item: t.Tuple[t.Any, t.Any]) -> t.Any:
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse)
@pass_environment
def do_sort(
environment: "Environment",
value: "t.Iterable[V]",
reverse: bool = False,
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.List[V]":
"""Sort an iterable using Python's :func:`sorted`.
.. sourcecode:: jinja
{% for city in cities|sort %}
...
{% endfor %}
:param reverse: Sort descending instead of ascending.
:param case_sensitive: When sorting strings, sort upper and lower
case separately.
:param attribute: When sorting objects or dicts, an attribute or
key to sort by. Can use dot notation like ``"address.city"``.
Can be a list of attributes like ``"age,name"``.
The sort is stable, it does not change the relative order of
elements that compare equal. This makes it is possible to chain
sorts on different attributes and ordering.
.. sourcecode:: jinja
{% for user in users|sort(attribute="name")
|sort(reverse=true, attribute="age") %}
...
{% endfor %}
As a shortcut to chaining when the direction is the same for all
attributes, pass a comma separate list of attributes.
.. sourcecode:: jinja
{% for user in users|sort(attribute="age,name") %}
...
{% endfor %}
.. versionchanged:: 2.11.0
The ``attribute`` parameter can be a comma separated list of
attributes, e.g. ``"age,name"``.
.. versionchanged:: 2.6
The ``attribute`` parameter was added.
"""
key_func = make_multi_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return sorted(value, key=key_func, reverse=reverse)
@pass_environment
def do_unique(
environment: "Environment",
value: "t.Iterable[V]",
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.Iterator[V]":
"""Returns a list of unique items from the given iterable.
.. sourcecode:: jinja
{{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter.
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
seen = set()
for item in value:
key = getter(item)
if key not in seen:
seen.add(key)
yield item
def _min_or_max(
environment: "Environment",
value: "t.Iterable[V]",
func: "t.Callable[..., V]",
case_sensitive: bool,
attribute: t.Optional[t.Union[str, int]],
) -> "t.Union[V, Undefined]":
it = iter(value)
try:
first = next(it)
except StopIteration:
return environment.undefined("No aggregated item, sequence was empty.")
key_func = make_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return func(chain([first], it), key=key_func)
@pass_environment
def do_min(
environment: "Environment",
value: "t.Iterable[V]",
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.Union[V, Undefined]":
"""Return the smallest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|min }}
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the min value of this attribute.
"""
return _min_or_max(environment, value, min, case_sensitive, attribute)
@pass_environment
def do_max(
environment: "Environment",
value: "t.Iterable[V]",
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.Union[V, Undefined]":
"""Return the largest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|max }}
-> 3
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute.
"""
return _min_or_max(environment, value, max, case_sensitive, attribute)
def do_default(
value: V,
default_value: V = "", # type: ignore
boolean: bool = False,
) -> V:
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
.. versionchanged:: 2.11
It's now possible to configure the :class:`~jinja2.Environment` with
:class:`~jinja2.ChainableUndefined` to make the `default` filter work
on nested elements and attributes that may contain undefined values
in the chain without getting an :exc:`~jinja2.UndefinedError`.
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@pass_eval_context
def sync_do_join(
eval_ctx: "EvalContext",
value: t.Iterable,
d: str = "",
attribute: t.Optional[t.Union[str, int]] = None,
) -> str:
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = map(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot easier then
if not eval_ctx.autoescape:
return str(d).join(map(str, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, "__html__"):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, "__html__"):
do_escape = True
else:
value[idx] = str(item)
if do_escape:
d = escape(d)
else:
d = str(d)
return d.join(value)
# no html involved, to normal joining
return soft_str(d).join(map(soft_str, value))
@async_variant(sync_do_join) # type: ignore
async def do_join(
eval_ctx: "EvalContext",
value: t.Union[t.AsyncIterable, t.Iterable],
d: str = "",
attribute: t.Optional[t.Union[str, int]] = None,
) -> str:
return sync_do_join(eval_ctx, await auto_to_list(value), d, attribute)
def do_center(value: str, width: int = 80) -> str:
"""Centers the value in a field of a given width."""
return soft_str(value).center(width)
@pass_environment
def sync_do_first(
environment: "Environment", seq: "t.Iterable[V]"
) -> "t.Union[V, Undefined]":
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined("No first item, sequence was empty.")
@async_variant(sync_do_first) # type: ignore
async def do_first(
environment: "Environment", seq: "t.Union[t.AsyncIterable[V], t.Iterable[V]]"
) -> "t.Union[V, Undefined]":
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
return environment.undefined("No first item, sequence was empty.")
@pass_environment
def do_last(
environment: "Environment", seq: "t.Reversible[V]"
) -> "t.Union[V, Undefined]":
"""Return the last item of a sequence.
Note: Does not work with generators. You may want to explicitly
convert it to a list:
.. sourcecode:: jinja
{{ data | selectattr('name', '==', 'Jinja') | list | last }}
"""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined("No last item, sequence was empty.")
# No async do_last, it may not be safe in async mode.
@pass_context
def do_random(context: "Context", seq: "t.Sequence[V]") -> "t.Union[V, Undefined]":
"""Return a random item from the sequence."""
try:
return random.choice(seq)
except IndexError:
return context.environment.undefined("No random item, sequence was empty.")
def do_filesizeformat(value: t.Union[str, float, int], binary: bool = False) -> str:
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = 1024 if binary else 1000
prefixes = [
("KiB" if binary else "kB"),
("MiB" if binary else "MB"),
("GiB" if binary else "GB"),
("TiB" if binary else "TB"),
("PiB" if binary else "PB"),
("EiB" if binary else "EB"),
("ZiB" if binary else "ZB"),
("YiB" if binary else "YB"),
]
if bytes == 1:
return "1 Byte"
elif bytes < base:
return f"{int(bytes)} Bytes"
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return f"{base * bytes / unit:.1f} {prefix}"
return f"{base * bytes / unit:.1f} {prefix}"
def do_pprint(value: t.Any) -> str:
"""Pretty print a variable. Useful for debugging."""
return pformat(value)
_uri_scheme_re = re.compile(r"^([\w.+-]{2,}:(/){0,2})$")
@pass_eval_context
def do_urlize(
eval_ctx: "EvalContext",
value: str,
trim_url_limit: t.Optional[int] = None,
nofollow: bool = False,
target: t.Optional[str] = None,
rel: t.Optional[str] = None,
extra_schemes: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Convert URLs in text into clickable links.
This may not recognize links in some situations. Usually, a more
comprehensive formatter, such as a Markdown library, is a better
choice.
Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
addresses. Links with trailing punctuation (periods, commas, closing
parentheses) and leading punctuation (opening parentheses) are
recognized excluding the punctuation. Email addresses that include
header fields are not recognized (for example,
``mailto:address@example.com?cc=copy@example.com``).
:param value: Original text containing URLs to link.
:param trim_url_limit: Shorten displayed URL values to this length.
:param nofollow: Add the ``rel=nofollow`` attribute to links.
:param target: Add the ``target`` attribute to links.
:param rel: Add the ``rel`` attribute to links.
:param extra_schemes: Recognize URLs that start with these schemes
in addition to the default behavior. Defaults to
``env.policies["urlize.extra_schemes"]``, which defaults to no
extra schemes.
.. versionchanged:: 3.0
The ``extra_schemes`` parameter was added.
.. versionchanged:: 3.0
Generate ``https://`` links for URLs without a scheme.
.. versionchanged:: 3.0
The parsing rules were updated. Recognize email addresses with
or without the ``mailto:`` scheme. Validate IP addresses. Ignore
parentheses and brackets in more cases.
.. versionchanged:: 2.8
The ``target`` parameter was added.
"""
policies = eval_ctx.environment.policies
rel_parts = set((rel or "").split())
if nofollow:
rel_parts.add("nofollow")
rel_parts.update((policies["urlize.rel"] or "").split())
rel = " ".join(sorted(rel_parts)) or None
if target is None:
target = policies["urlize.target"]
if extra_schemes is None:
extra_schemes = policies["urlize.extra_schemes"] or ()
for scheme in extra_schemes:
if _uri_scheme_re.fullmatch(scheme) is None:
raise FilterArgumentError(f"{scheme!r} is not a valid URI scheme prefix.")
rv = urlize(
value,
trim_url_limit=trim_url_limit,
rel=rel,
target=target,
extra_schemes=extra_schemes,
)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(
s: str, width: t.Union[int, str] = 4, first: bool = False, blank: bool = False
) -> str:
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
:param width: Number of spaces, or a string, to indent by.
:param first: Don't skip indenting the first line.
:param blank: Don't skip indenting empty lines.
.. versionchanged:: 3.0
``width`` can be a string.
.. versionchanged:: 2.10
Blank lines are not indented by default.
Rename the ``indentfirst`` argument to ``first``.
"""
if isinstance(width, str):
indention = width
else:
indention = " " * width
newline = "\n"
if isinstance(s, Markup):
indention = Markup(indention)
newline = Markup(newline)
s += newline # this quirk is necessary for splitlines method
if blank:
rv = (newline + indention).join(s.splitlines())
else:
lines = s.splitlines()
rv = lines.pop(0)
if lines:
rv += newline + newline.join(
indention + line if line else line for line in lines
)
if first:
rv = indention + rv
return rv
@pass_environment
def do_truncate(
env: "Environment",
s: str,
length: int = 255,
killwords: bool = False,
end: str = "...",
leeway: t.Optional[int] = None,
) -> str:
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter. Strings that only exceed the length by the tolerance
margin given in the fourth parameter will not be truncated.
.. sourcecode:: jinja
{{ "foo bar baz qux"|truncate(9) }}
-> "foo..."
{{ "foo bar baz qux"|truncate(9, True) }}
-> "foo ba..."
{{ "foo bar baz qux"|truncate(11) }}
-> "foo bar baz qux"
{{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-> "foo bar..."
The default leeway on newer Jinja versions is 5 and was 0 before but
can be reconfigured globally.
"""
if leeway is None:
leeway = env.policies["truncate.leeway"]
assert length >= len(end), f"expected length >= {len(end)}, got {length}"
assert leeway >= 0, f"expected leeway >= 0, got {leeway}"
if len(s) <= length + leeway:
return s
if killwords:
return s[: length - len(end)] + end
result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
@pass_environment
def do_wordwrap(
environment: "Environment",
s: str,
width: int = 79,
break_long_words: bool = True,
wrapstring: t.Optional[str] = None,
break_on_hyphens: bool = True,
) -> str:
"""Wrap a string to the given width. Existing newlines are treated
as paragraphs to be wrapped separately.
:param s: Original text to wrap.
:param width: Maximum length of wrapped lines.
:param break_long_words: If a word is longer than ``width``, break
it across lines.
:param break_on_hyphens: If a word contains hyphens, it may be split
across lines.
:param wrapstring: String to join each wrapped line. Defaults to
:attr:`Environment.newline_sequence`.
.. versionchanged:: 2.11
Existing newlines are treated as paragraphs wrapped separately.
.. versionchanged:: 2.11
Added the ``break_on_hyphens`` parameter.
.. versionchanged:: 2.7
Added the ``wrapstring`` parameter.
"""
import textwrap
if wrapstring is None:
wrapstring = environment.newline_sequence
# textwrap.wrap doesn't consider existing newlines when wrapping.
# If the string has a newline before width, wrap will still insert
# a newline at width, resulting in a short line. Instead, split and
# wrap each paragraph individually.
return wrapstring.join(
[
wrapstring.join(
textwrap.wrap(
line,
width=width,
expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words,
break_on_hyphens=break_on_hyphens,
)
)
for line in s.splitlines()
]
)
_word_re = re.compile(r"\w+")
def do_wordcount(s: str) -> int:
"""Count the words in that string."""
return len(_word_re.findall(soft_str(s)))
def do_int(value: t.Any, default: int = 0, base: int = 10) -> int:
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
can also override the default base (10) in the second
parameter, which handles input with prefixes such as
0b, 0o and 0x for bases 2, 8 and 16 respectively.
The base is ignored for decimal numbers and non-string values.
"""
try:
if isinstance(value, str):
return int(value, base)
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value: t.Any, default: float = 0.0) -> float:
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value: str, *args: t.Any, **kwargs: t.Any) -> str:
"""Apply the given values to a `printf-style`_ format string, like
``string % values``.
.. sourcecode:: jinja
{{ "%s, %s!"|format(greeting, name) }}
Hello, World!
In most cases it should be more convenient and efficient to use the
``%`` operator or :meth:`str.format`.
.. code-block:: text
{{ "%s, %s!" % (greeting, name) }}
{{ "{}, {}!".format(greeting, name) }}
.. _printf-style: https://docs.python.org/library/stdtypes.html
#printf-style-string-formatting
"""
if args and kwargs:
raise FilterArgumentError(
"can't handle positional and keyword arguments at the same time"
)
return soft_str(value) % (kwargs or args)
def do_trim(value: str, chars: t.Optional[str] = None) -> str:
"""Strip leading and trailing characters, by default whitespace."""
return soft_str(value).strip(chars)
def do_striptags(value: "t.Union[str, HasHTML]") -> str:
"""Strip SGML/XML tags and replace adjacent whitespace by one space."""
if hasattr(value, "__html__"):
value = t.cast("HasHTML", value).__html__()
return Markup(str(value)).striptags()
def sync_do_slice(
value: "t.Collection[V]", slices: int, fill_with: "t.Optional[V]" = None
) -> "t.Iterator[t.List[V]]":
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columnwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
@async_variant(sync_do_slice) # type: ignore
async def do_slice(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
slices: int,
fill_with: t.Optional[t.Any] = None,
) -> "t.Iterator[t.List[V]]":
return sync_do_slice(await auto_to_list(value), slices, fill_with)
def do_batch(
value: "t.Iterable[V]", linecount: int, fill_with: "t.Optional[V]" = None
) -> "t.Iterator[t.List[V]]":
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
tmp: "t.List[V]" = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(
value: float,
precision: int = 0,
method: 'te.Literal["common", "ceil", "floor"]' = "common",
) -> float:
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if method not in {"common", "ceil", "floor"}:
raise FilterArgumentError("method must be common, ceil or floor")
if method == "common":
return round(value, precision)
func = getattr(math, method)
return t.cast(float, func(value * (10**precision)) / (10**precision))
class _GroupTuple(t.NamedTuple):
grouper: t.Any
list: t.List
# Use the regular tuple repr to hide this subclass if users print
# out the value during debugging.
def __repr__(self) -> str:
return tuple.__repr__(self)
def __str__(self) -> str:
return tuple.__str__(self)
@pass_environment
def sync_do_groupby(
environment: "Environment",
value: "t.Iterable[V]",
attribute: t.Union[str, int],
default: t.Optional[t.Any] = None,
case_sensitive: bool = False,
) -> "t.List[_GroupTuple]":
"""Group a sequence of objects by an attribute using Python's
:func:`itertools.groupby`. The attribute can use dot notation for
nested access, like ``"address.city"``. Unlike Python's ``groupby``,
the values are sorted first so only one group is returned for each
unique value.
For example, a list of ``User`` objects with a ``city`` attribute
can be rendered in groups. In this example, ``grouper`` refers to
the ``city`` value of the group.
.. sourcecode:: html+jinja
<ul>{% for city, items in users|groupby("city") %}
<li>{{ city }}
<ul>{% for user in items %}
<li>{{ user.name }}
{% endfor %}</ul>
</li>
{% endfor %}</ul>
``groupby`` yields namedtuples of ``(grouper, list)``, which
can be used instead of the tuple unpacking above. ``grouper`` is the
value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
<ul>{% for group in users|groupby("city") %}
<li>{{ group.grouper }}: {{ group.list|join(", ") }}
{% endfor %}</ul>
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
<ul>{% for city, items in users|groupby("city", default="NY") %}
<li>{{ city }}: {{ items|map(attribute="name")|join(", ") }}</li>
{% endfor %}</ul>
Like the :func:`~jinja-filters.sort` filter, sorting and grouping is
case-insensitive by default. The ``key`` for each group will have
the case of the first item in that group of values. For example, if
a list of users has cities ``["CA", "NY", "ca"]``, the "CA" group
will have two values. This can be disabled by passing
``case_sensitive=True``.
.. versionchanged:: 3.1
Added the ``case_sensitive`` parameter. Sorting and grouping is
case-insensitive by default, matching other filters that do
comparisons.
.. versionchanged:: 3.0
Added the ``default`` parameter.
.. versionchanged:: 2.6
The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(
environment,
attribute,
postprocess=ignore_case if not case_sensitive else None,
default=default,
)
out = [
_GroupTuple(key, list(values))
for key, values in groupby(sorted(value, key=expr), expr)
]
if not case_sensitive:
# Return the real key from the first value instead of the lowercase key.
output_expr = make_attrgetter(environment, attribute, default=default)
out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
return out
@async_variant(sync_do_groupby) # type: ignore
async def do_groupby(
environment: "Environment",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
attribute: t.Union[str, int],
default: t.Optional[t.Any] = None,
case_sensitive: bool = False,
) -> "t.List[_GroupTuple]":
expr = make_attrgetter(
environment,
attribute,
postprocess=ignore_case if not case_sensitive else None,
default=default,
)
out = [
_GroupTuple(key, await auto_to_list(values))
for key, values in groupby(sorted(await auto_to_list(value), key=expr), expr)
]
if not case_sensitive:
# Return the real key from the first value instead of the lowercase key.
output_expr = make_attrgetter(environment, attribute, default=default)
out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
return out
@pass_environment
def sync_do_sum(
environment: "Environment",
iterable: "t.Iterable[V]",
attribute: t.Optional[t.Union[str, int]] = None,
start: V = 0, # type: ignore
) -> V:
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The ``attribute`` parameter was added to allow summing up over
attributes. Also the ``start`` parameter was moved on to the right.
"""
if attribute is not None:
iterable = map(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start) # type: ignore[no-any-return, call-overload]
@async_variant(sync_do_sum) # type: ignore
async def do_sum(
environment: "Environment",
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
attribute: t.Optional[t.Union[str, int]] = None,
start: V = 0, # type: ignore
) -> V:
rv = start
if attribute is not None:
func = make_attrgetter(environment, attribute)
else:
def func(x: V) -> V:
return x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
def sync_do_list(value: "t.Iterable[V]") -> "t.List[V]":
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
@async_variant(sync_do_list) # type: ignore
async def do_list(value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]") -> "t.List[V]":
return await auto_to_list(value)
def do_mark_safe(value: str) -> Markup:
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value: str) -> str:
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return str(value)
@typing.overload
def do_reverse(value: str) -> str:
...
@typing.overload
def do_reverse(value: "t.Iterable[V]") -> "t.Iterable[V]":
...
def do_reverse(value: t.Union[str, t.Iterable[V]]) -> t.Union[str, t.Iterable[V]]:
"""Reverse the object or return an iterator that iterates over it the other
way round.
"""
if isinstance(value, str):
return value[::-1]
try:
return reversed(value) # type: ignore
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError as e:
raise FilterArgumentError("argument must be iterable") from e
@pass_environment
def do_attr(
environment: "Environment", obj: t.Any, name: str
) -> t.Union[Undefined, t.Any]:
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo.bar`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed:
environment = t.cast("SandboxedEnvironment", environment)
if not environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@typing.overload
def sync_do_map(
context: "Context", value: t.Iterable, name: str, *args: t.Any, **kwargs: t.Any
) -> t.Iterable:
...
@typing.overload
def sync_do_map(
context: "Context",
value: t.Iterable,
*,
attribute: str = ...,
default: t.Optional[t.Any] = None,
) -> t.Iterable:
...
@pass_context
def sync_do_map(
context: "Context", value: t.Iterable, *args: t.Any, **kwargs: t.Any
) -> t.Iterable:
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
{{ users|map(attribute="username", default="Anonymous")|join(", ") }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
Similar to a generator comprehension such as:
.. code-block:: python
(u.username for u in users)
(getattr(u, "username", "Anonymous") for u in users)
(do_lower(x) for x in titles)
.. versionchanged:: 2.11.0
Added the ``default`` parameter.
.. versionadded:: 2.7
"""
if value:
func = prepare_map(context, args, kwargs)
for item in value:
yield func(item)
@typing.overload
def do_map(
context: "Context",
value: t.Union[t.AsyncIterable, t.Iterable],
name: str,
*args: t.Any,
**kwargs: t.Any,
) -> t.Iterable:
...
@typing.overload
def do_map(
context: "Context",
value: t.Union[t.AsyncIterable, t.Iterable],
*,
attribute: str = ...,
default: t.Optional[t.Any] = None,
) -> t.Iterable:
...
@async_variant(sync_do_map) # type: ignore
async def do_map(
context: "Context",
value: t.Union[t.AsyncIterable, t.Iterable],
*args: t.Any,
**kwargs: t.Any,
) -> t.AsyncIterable:
if value:
func = prepare_map(context, args, kwargs)
async for item in auto_aiter(value):
yield await auto_await(func(item))
@pass_context
def sync_do_select(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and only selecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
{{ numbers|select("odd") }}
{{ numbers|select("divisibleby", 3) }}
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if test_odd(n))
(n for n in numbers if test_divisibleby(n, 3))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, False)
@async_variant(sync_do_select) # type: ignore
async def do_select(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, False)
@pass_context
def sync_do_reject(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and rejecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if not test_odd(n))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@async_variant(sync_do_reject) # type: ignore
async def do_reject(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@pass_context
def sync_do_selectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and only selecting the objects with the
test succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if user.is_active)
(u for user in users if test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, True)
@async_variant(sync_do_selectattr) # type: ignore
async def do_selectattr(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, True)
@pass_context
def sync_do_rejectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and rejecting the objects with the test
succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if not user.is_active)
(u for user in users if not test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@async_variant(sync_do_rejectattr) # type: ignore
async def do_rejectattr(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@pass_eval_context
def do_tojson(
eval_ctx: "EvalContext", value: t.Any, indent: t.Optional[int] = None
) -> Markup:
"""Serialize an object to a string of JSON, and mark it safe to
render in HTML. This filter is only for use in HTML documents.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param value: The object to serialize to JSON.
:param indent: The ``indent`` parameter passed to ``dumps``, for
pretty-printing the value.
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
dumps = policies["json.dumps_function"]
kwargs = policies["json.dumps_kwargs"]
if indent is not None:
kwargs = kwargs.copy()
kwargs["indent"] = indent
return htmlsafe_json_dumps(value, dumps=dumps, **kwargs)
def prepare_map(
context: "Context", args: t.Tuple, kwargs: t.Dict[str, t.Any]
) -> t.Callable[[t.Any], t.Any]:
if not args and "attribute" in kwargs:
attribute = kwargs.pop("attribute")
default = kwargs.pop("default", None)
if kwargs:
raise FilterArgumentError(
f"Unexpected keyword argument {next(iter(kwargs))!r}"
)
func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[0]
args = args[1:]
except LookupError:
raise FilterArgumentError("map requires a filter argument") from None
def func(item: t.Any) -> t.Any:
return context.environment.call_filter(
name, item, args, kwargs, context=context
)
return func
def prepare_select_or_reject(
context: "Context",
args: t.Tuple,
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> t.Callable[[t.Any], t.Any]:
if lookup_attr:
try:
attr = args[0]
except LookupError:
raise FilterArgumentError("Missing parameter for attribute name") from None
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
def transfunc(x: V) -> V:
return x
try:
name = args[off]
args = args[1 + off :]
def func(item: t.Any) -> t.Any:
return context.environment.call_test(name, item, args, kwargs)
except LookupError:
func = bool # type: ignore
return lambda item: modfunc(func(transfunc(item)))
def select_or_reject(
context: "Context",
value: "t.Iterable[V]",
args: t.Tuple,
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.Iterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
for item in value:
if func(item):
yield item
async def async_select_or_reject(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
args: t.Tuple,
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.AsyncIterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
async for item in auto_aiter(value):
if func(item):
yield item
FILTERS = {
"abs": abs,
"attr": do_attr,
"batch": do_batch,
"capitalize": do_capitalize,
"center": do_center,
"count": len,
"d": do_default,
"default": do_default,
"dictsort": do_dictsort,
"e": escape,
"escape": escape,
"filesizeformat": do_filesizeformat,
"first": do_first,
"float": do_float,
"forceescape": do_forceescape,
"format": do_format,
"groupby": do_groupby,
"indent": do_indent,
"int": do_int,
"join": do_join,
"last": do_last,
"length": len,
"list": do_list,
"lower": do_lower,
"items": do_items,
"map": do_map,
"min": do_min,
"max": do_max,
"pprint": do_pprint,
"random": do_random,
"reject": do_reject,
"rejectattr": do_rejectattr,
"replace": do_replace,
"reverse": do_reverse,
"round": do_round,
"safe": do_mark_safe,
"select": do_select,
"selectattr": do_selectattr,
"slice": do_slice,
"sort": do_sort,
"string": soft_str,
"striptags": do_striptags,
"sum": do_sum,
"title": do_title,
"trim": do_trim,
"truncate": do_truncate,
"unique": do_unique,
"upper": do_upper,
"urlencode": do_urlencode,
"urlize": do_urlize,
"wordcount": do_wordcount,
"wordwrap": do_wordwrap,
"xmlattr": do_xmlattr,
"tojson": do_tojson,
}
| bsd-3-clause | b0a432d36e8068d68e0e50dc112122d2 | 28.080978 | 87 | 0.611467 | 3.761088 | false | false | false | false |
wal-e/wal-e | wal_e/worker/wabs/wabs_deleter.py | 1 | 1258 | from wal_e import retries
from wal_e import log_help
from wal_e.worker.base import _Deleter
try:
# New class name in the Azure SDK sometime after v1.0.
#
# See
# https://github.com/Azure/azure-sdk-for-python/blob/master/ChangeLog.txt
from azure.common import AzureMissingResourceHttpError
except ImportError:
# Backwards compatbility for older Azure drivers.
from azure import WindowsAzureMissingResourceError \
as AzureMissingResourceHttpError
logger = log_help.WalELogger(__name__)
class Deleter(_Deleter):
def __init__(self, wabs_conn, container):
super(Deleter, self).__init__()
self.wabs_conn = wabs_conn
self.container = container
@retries.retry()
def _delete_batch(self, page):
# Azure Blob Service has no concept of mass-delete, so we must nuke
# each blob one-by-one...
for blob in page:
try:
self.wabs_conn.delete_blob(self.container, blob.name)
except AzureMissingResourceHttpError:
logger.warning(
msg='failed while deleting resource',
detail='Blob {0} does not exist in container {1}.'.format(
blob.name, self.container))
| bsd-3-clause | 51bc29a565e25c9cc31296cb56ac56f0 | 32.105263 | 78 | 0.636725 | 4.032051 | false | false | false | false |
ultrabug/py3status | py3status/modules/external_script.py | 2 | 3844 | """
Display output of a given script.
Display output of any executable script set by `script_path`. Only the first
two lines of output will be used. The first line is used as the displayed
text. If the output has two or more lines, the second line is set as the text
color (and should hence be a valid hex color code such as #FF0000 for red).
The script should not have any parameters, but it could work.
Configuration parameters:
button_show_notification: button to show notification with full output
(default None)
cache_timeout: how often we refresh this module in seconds
(default 15)
convert_numbers: convert decimal numbers to a numeric type
(default True)
format: see placeholders below (default '{output}')
localize: should script output be localized (if available)
(default True)
script_path: script you want to show output of (compulsory)
(default None)
strip_output: shall we strip leading and trailing spaces from output
(default False)
Format placeholders:
{lines} number of lines in the output
{output} output of script given by "script_path"
Examples:
```
external_script {
format = "my name is {output}"
script_path = "/usr/bin/whoami"
}
```
@author frimdo ztracenastopa@centrum.cz
SAMPLE OUTPUT
{'full_text': 'script output'}
example
{'full_text': 'It is now: Wed Feb 22 22:24:13'}
"""
import re
STRING_ERROR = "missing script_path"
class Py3status:
""" """
# available configuration parameters
button_show_notification = None
cache_timeout = 15
convert_numbers = True
format = "{output}"
localize = True
script_path = None
strip_output = False
def post_config_hook(self):
if not self.script_path:
raise Exception(STRING_ERROR)
def external_script(self):
output_lines = None
response = {}
response["cached_until"] = self.py3.time_in(self.cache_timeout)
try:
self.output = self.py3.command_output(
self.script_path, shell=True, localized=self.localize
)
output_lines = self.output.splitlines()
if len(output_lines) > 1:
output_color = output_lines[1]
if re.search(r"^#[0-9a-fA-F]{6}$", output_color):
response["color"] = output_color
except self.py3.CommandError as e:
# something went wrong show error to user
output = e.output or e.error
self.py3.error(output)
if output_lines:
output = output_lines[0]
if self.strip_output:
output = output.strip()
# If we get something that looks numeric then we convert it
# to a numeric type because this can be helpful. for example:
#
# external_script {
# format = "file is [\?if=output>10 big|small]"
# script_path = "cat /tmp/my_file | wc -l"
# }
if self.convert_numbers is True:
try:
output = int(output)
except ValueError:
try:
output = float(output)
except ValueError:
pass
else:
output = ""
response["full_text"] = self.py3.safe_format(
self.format, {"output": output, "lines": len(output_lines)}
)
return response
def on_click(self, event):
button = event["button"]
if button == self.button_show_notification:
self.py3.notify_user(self.output)
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | ccb39f933450d12db28e55bd5c927557 | 29.752 | 77 | 0.593132 | 4.115632 | false | false | false | false |
ultrabug/py3status | py3status/modules/usbguard.py | 2 | 5919 | r"""
Allow or Reject newly plugged USB devices using USBGuard.
Configuration parameters:
format: display format for this module
(default '{format_device}')
format_button_allow: display format for allow button filter
(default '\[Allow\]')
format_button_reject: display format for reject button filter
(default '\[Reject\]')
format_device: display format for USB devices
(default '{format_button_reject} [{name}|{usb_id}] {format_button_allow}')
format_device_separator: show separator if more than one (default ' ')
Format placeholders:
{device} number of USB devices
{format_device} format for USB devices
format_device:
{format_button_allow} button to allow the device
{format_button_reject} button to reject the device
{id} eg 1, 2, 5, 6, 7, 22, 23, 33
{policy} eg allow, block, reject
{usb_id} eg 054c:0268
{name} eg Poker II, PLAYSTATION(R)3 Controller
{serial} eg 0000:00:00.0
{port} eg usb1, usb2, usb3, 1-1, 4-1.2.1
{interface} eg 00:00:00:00 00:00:00 00:00:00
{hash} eg ihYz60+8pxZBi/cm+Q/4Ibrsyyzq/iZ9xtMDAh53sng
{parent_hash} eg npSDT1xuEIOSLNt2RT2EbFrE8XRZoV29t1n7kg6GxXg
Requires:
python-gobject: Python Bindings for GLib/GObject/GIO/GTK+
usbguard: USB device authorization policy framework
@author @cyrinux, @maximbaz
@license BSD
SAMPLE OUTPUT
[
{'full_text': '[Reject] ', 'urgent': True},
{'full_text': 'USB Flash Drive ', 'urgent': True},
{'full_text': '[Allow]', 'urgent': True}
]
"""
from threading import Thread
from gi.repository import GLib, Gio
import re
STRING_USBGUARD_DBUS = "start usbguard-dbus.service"
class Py3status:
""" """
# available configuration parameters
format = "{format_device}"
format_button_allow = r"\[Allow\]"
format_button_reject = r"\[Reject\]"
format_device = "{format_button_reject} [{name}|{usb_id}] {format_button_allow}"
format_device_separator = " "
def post_config_hook(self):
self.init = {
"format_button": self.py3.get_placeholders_list(
self.format_device, "format_button_*"
),
"target": {"allow": 0, "reject": 2},
}
self.keys = [
("serial", re.compile(r"\S*serial \"(\S+)\"\S*")),
("policy", re.compile(r"^(\S+)")),
("usb_id", re.compile(r"id (\S+)")),
("name", re.compile(r"name \"(.*)\" hash")),
("hash", re.compile(r"hash \"(.*)\" parent-hash")),
("parent_hash", re.compile(r"parent-hash \"(.*)\" via-port")),
("port", re.compile(r"via-port \"(.*)\" with-interface")),
("interface", re.compile(r"with-interface { (.*) }$")),
]
self._init_dbus()
def _init_dbus(self):
self.bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
self.proxy = Gio.DBusProxy.new_sync(
self.bus,
Gio.DBusProxyFlags.NONE,
None,
"org.usbguard1",
"/org/usbguard1/Devices",
"org.usbguard.Devices1",
None,
)
for signal in ["DevicePolicyChanged", "DevicePresenceChanged"]:
self.bus.signal_subscribe(
None,
"org.usbguard.Devices1",
signal,
None,
None,
0,
lambda *args: self.py3.update(),
)
thread = Thread(target=lambda: GLib.MainLoop().run())
thread.daemon = True
thread.start()
def _get_devices(self):
try:
raw_devices = self.proxy.listDevices("(s)", "block")
except Exception:
raise Exception(STRING_USBGUARD_DBUS)
devices = []
for device_id, string in raw_devices:
device = {"id": device_id}
string = string.encode("latin-1").decode("unicode_escape")
string = string.encode("latin-1").decode("utf-8")
for name, regex in self.keys:
value = regex.findall(string) or None
if value:
value = value[0]
device[name] = value
devices.append(device)
return devices
def _format_device(self, devices):
device_info = []
for device in devices:
for btn in self.init["format_button"]:
composite = self.py3.safe_format(getattr(self, btn), device)
device[btn] = self.py3.composite_update(
composite,
{"index": "{}/{}".format(device["id"], btn.split("_")[-1])},
)
device_info.append(self.py3.safe_format(self.format_device, device))
format_device_separator = self.py3.safe_format(self.format_device_separator)
format_device = self.py3.composite_join(format_device_separator, device_info)
return format_device
def usbguard(self):
devices = self._get_devices()
usbguard_data = {
"device": len(devices),
"format_device": self._format_device(devices),
}
return {
"cached_until": self.py3.CACHE_FOREVER,
"full_text": self.py3.safe_format(self.format, usbguard_data),
"urgent": True,
}
def on_click(self, event):
if isinstance(event["index"], int):
return
device_id, policy_name = event["index"].split("/")
policy = self.init["target"][policy_name]
self.proxy.applyDevicePolicy("(uub)", int(device_id), policy, False)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | ca43987b05b706ee4e47bc7a008fb569 | 32.440678 | 85 | 0.546545 | 3.68785 | false | false | false | false |
ultrabug/py3status | py3status/modules/conky.py | 2 | 13937 | r"""
Display Conky objects/variables on the bar.
Configuration parameters:
config: specify configuration settings for conky (default {})
format: display format for this module (default None)
thresholds: specify color thresholds to use (default [])
Format placeholders:
According to man page, Conky has more than 250 built-in objects/variables.
See `man -P 'less -p OBJECTS/VARIABLES' conky` for a full list of Conky
objects/variables to use. Not all of Conky objects/variables will be
supported or usable.
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Replace spaces with periods.
Examples:
```
# add conky config options
# See `man -P "less -p 'CONFIGURATION SETTINGS'" conky` for a full list
# of Conky configuration options. Not all of Conky configuration options
# will be supported or usable.
conky {
config = {
'update_interval': 10 # update interval for conky
'update_interval_on_battery': 60 # update interval when on battery
'format_human_readable': True, # if False, print in bytes
'short_units': True, # shortens units, eg kiB->k, GiB->G
'uppercase': True, # upper placeholders
}
}
# display ip address
order += "conky addr"
conky addr {
format = 'IP [\?color=orange {addr eno1}]'
}
# display load averages
order += "conky loadavg"
conky loadavg {
format = 'Loadavg '
format += '[\?color=lightgreen {loadavg 1} ]'
format += '[\?color=lightgreen {loadavg 2} ]'
format += '[\?color=lightgreen {loadavg 3}]'
}
# exec commands at different intervals, eg 5s, 60s, and 3600s
order += "conky date"
conky date {
format = 'Exec '
format += '[\?color=good {execi 5 "date"}] '
format += '[\?color=degraded {execi 60 "uptime -p"}] '
format += '[\?color=bad {execi 3600 "uptime -s"}]'
}
# display diskio read, write, etc
order += "conky diskio"
conky diskio {
format = 'Disk IO [\?color=darkgray&show sda] '
format += '[\?color=lightskyblue '
format += '{diskio_read sda}/{diskio_write sda} '
format += '({diskio sda})]'
# format += ' '
# format += '[\?color=darkgray&show sdb] '
# format += '[\?color=lightskyblue '
# format += '{diskio_read sdb}/{diskio_write sdb} '
# format += '({diskio sdb})]'
config = {'short_units': True}
}
# display total number of processes and running processes
order += "conky proc"
conky proc {
format = 'Processes [\?color=cyan {processes}/{running_processes}]'
}
# display top 3 cpu (+mem_res) processes
order += "conky top_cpu" {
conky top_cpu {
format = 'Top [\?color=darkgray '
format += '{top name 1} '
format += '[\?color=deepskyblue {top mem_res 1}] '
format += '[\?color=lightskyblue {top cpu 1}%] '
format += '{top name 2} '
format += '[\?color=deepskyblue {top mem_res 2}] '
format += '[\?color=lightskyblue {top cpu 2}%] '
format += '{top name 3} '
format += '[\?color=deepskyblue {top mem_res 3}] '
format += '[\?color=lightskyblue {top cpu 3}%]]'
config = {'short_units': True}
}
# display top 3 memory processes
order += "conky top_mem"
conky top_mem {
format = 'Top Mem [\?color=darkgray '
format += '{top_mem name 1} '
format += '[\?color=yellowgreen {top_mem mem_res 1}] '
format += '[\?color=lightgreen {top_mem mem 1}%] '
format += '{top_mem name 2} '
format += '[\?color=yellowgreen {top_mem mem_res 2}] '
format += '[\?color=lightgreen {top_mem mem 2}%] '
format += '{top_mem name 3} '
format += '[\?color=yellowgreen {top_mem mem_res 3}] '
format += '[\?color=lightgreen {top_mem mem 3}%]]'
config = {'short_units': True}
}
# display memory, memperc, membar + thresholds
order += "conky memory"
conky memory {
format = 'Memory [\?color=lightskyblue {mem}/{memmax}] '
format += '[\?color=memperc {memperc}% \[{membar}\]]'
thresholds = [
(0, 'darkgray'), (0.001, 'good'), (50, 'degraded'),
(75, 'orange'), (85, 'bad')
]
}
# display swap, swapperc, swapbar + thresholds
order += "conky swap"
conky swap {
format = 'Swap [\?color=lightcoral {swap}/{swapmax}] '
format += '[\?color=swapperc {swapperc}% \[{swapbar}\]]'
thresholds = [
(0, 'darkgray'), (0.001, 'good'), (50, 'degraded'),
(75, 'orange'), (85, 'bad')
]
}
# display up/down speed and up/down total
order += "conky network"
conky network {
format = 'Speed [\?color=title {upspeed eno1}/{downspeed eno1}] '
format += 'Total [\?color=title {totalup eno1}/{totaldown eno1}]'
color_title = '#ff6699'
}
# display file systems + thresholds
order += "conky filesystem"
conky filesystem {
# home filesystem
format = 'Home [\?color=violet {fs_used /home}/{fs_size /home} '
format += '[\?color=fs_used_perc./home '
format += '{fs_used_perc /home}% \[{fs_bar /home}\]]]'
# hdd filesystem
# format += ' HDD [\?color=violet {fs_used '
# format += '/run/media/user/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
# format += '}/{fs_size '
# format += '/run/media/user/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
# format += '}[\?color=fs_used_perc.'
# format += '/run/media/user/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
# format += ' {fs_used_perc '
# format += '/run/media/user/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
# format += '}% \[{fs_bar '
# format += '/run/media/user/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
# format += '}\]]]'
thresholds = [
(0, 'darkgray'), (0.001, 'good'), (50, 'degraded'),
(75, 'orange'), (85, 'bad')
]
}
# show cpu percents/bars + thresholds
order += "conky cpu"
conky cpu {
format = 'CPU '
format += '[\?color=cpu.cpu0 {cpu cpu0}% {cpubar cpu0}] '
format += '[\?color=cpu.cpu1 {cpu cpu1}% {cpubar cpu1}] '
format += '[\?color=cpu.cpu2 {cpu cpu2}% {cpubar cpu2}] '
format += '[\?color=cpu.cpu3 {cpu cpu3}% {cpubar cpu3}]'
thresholds = [
(0, 'darkgray'), (0.001, 'good'), (50, 'degraded'),
(75, 'orange'), (85, 'bad')
]
}
# show more examples, many outputs
order += "conky info"
conky info {
format = '[\?color=title&show OS] [\?color=output {distribution}] '
format += '[\?color=title&show CPU] [\?color=output {cpu cpu0}%] '
format += '[\?color=title&show MEM] '
format += '[\?color=output {mem}/{memmax} ({memperc}%)] '
format += '[\?color=title&show HDD] [\?color=output {fs_used_perc}%] '
format += '[\?color=title&show Kernel] [\?color=output {kernel}] '
format += '[\?color=title&show Loadavg] [\?color=output {loadavg 1}] '
format += '[\?color=title&show Uptime] [\?color=output {uptime}] '
format += '[\?color=title&show Freq GHZ] [\?color=output {freq_g}]'
color_title = '#ffffff'
color_output = '#00bfff'
}
# change console bars - shoutout to su8 for adding this
conky {
config = {
'console_bar_fill': "'#'",
'console_bar_unfill': "'_'",
'default_bar_width': 10,
}
}
# display nvidia stats - shoutout to brndnmtthws for fixing this
# See `man -P 'less -p nvidia\ argument' conky` for more nvidia variables.
order += "conky nvidia"
conky nvidia {
format = 'GPU Temp [\?color=greenyellow {nvidia temp}] '
format += 'GPU Freq [\?color=greenyellow {nvidia gpufreq}] '
format += 'Mem Freq [\?color=greenyellow {nvidia memfreq}] '
format += 'MTR Freq [\?color=greenyellow {nvidia mtrfreq}] '
format += 'Perf [\?color=greenyellow {nvidia perflevel}] '
format += 'Mem Perc [\?color=greenyellow {nvidia memperc}]'
config = {
'nvidia_display': "':0'"
}
}
```
@author lasers
SAMPLE OUTPUT
[{'full_text': 'IP '}, {'full_text': u'192.168.1.113', 'color': '#ffa500'}]
diskio
[
{'full_text': 'Disk IO '},
{'full_text': 'sda ', 'color': '#a9a9a9'},
{'full_text': '0B/285K (285K) ', 'color': '#87cefa'},
{'full_text': 'sdb ', 'color': '#a9a9a9'},
{'full_text': '40K/116K (156K)', 'color': '#87cefa'},
]
processes
[
{'full_text': 'Processes '}, {'full_text': u'342/0', 'color': '#00ffff'}
]
top
[
{'full_text': 'Top '},
{'full_text': 'firefox-esr ', 'color': '#a9a9a9'},
{'full_text': '512M ', 'color': '#00bfff'},
{'full_text': '0.25% ', 'color': '#87cefa'},
{'full_text': 'htop ', 'color': '#a9a9a9'},
{'full_text': '2.93M ', 'color': '#00bfff'},
{'full_text': '0.17%', 'color': '#87cefa'},
]
top_mem
[
{'full_text': 'Top Mem '},
{'full_text': 'chrome ', 'color': '#a9a9a9'},
{'full_text': '607M ', 'color': '#006400'},
{'full_text': '7.86% ', 'color': '#90ee90'},
{'full_text': 'thunderbird ', 'color': '#a9a9a9'},
{'full_text': '449M ', 'color': '#006400'},
{'full_text': '5.82%', 'color': '#90ee90'},
]
network
[
{'full_text': 'Speed '},
{'color': '#ff6699', 'full_text': '15B/84B '},
{'full_text': 'Total '},
{'color': '#ff6699', 'full_text': '249MiB/4.27GiB'},
]
memory
[
{'full_text': 'Memory '},
{'full_text': '2.68G/7.72G ', 'color': '#87cefa'},
{'full_text': '34% [###.......]', 'color': '#8ae234'}
]
swap
[
{'full_text': 'Swap '},
{'full_text': '4.5MiB/7.72GiB ', 'color': '#f08080'},
{'full_text': '0% [..........]', 'color': '#a9a9a9'}
]
disk
[
{'full_text': 'Home '},
{'full_text': '167G/431G ', 'color': '#ee82ee'},
{'full_text': '38% [####......]', 'color': '#8ae234'},
]
nvidia
[
{'full_text': 'GPU Temp '}, {'full_text': '64 ', 'color': '#adff2f'},
{'full_text': 'GPU Freq '}, {'full_text': '460 ', 'color': '#adff2f'},
{'full_text': 'Mem Freq '}, {'full_text': '695', 'color': '#adff2f'},
]
nvidia
[
{'full_text': 'MTR Freq '}, {'full_text': '1390 ', 'color': '#adff2f'},
{'full_text': 'Perf '}, {'full_text': '1 ', 'color': '#adff2f'},
{'full_text': 'Mem Perc '}, {'full_text': '61', 'color': '#adff2f'},
]
bar
[
{'full_text': '#.... ', 'color': '#ffffff'},
{'full_text': '##... ', 'color': '#00FF00'},
{'full_text': '###.. ', 'color': '#FFA500'},
{'full_text': '####. ', 'color': '#FFFF00'},
{'full_text': '#####', 'color': '#FF0000'},
]
"""
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from tempfile import NamedTemporaryFile
from json import dumps
from pathlib import Path
STRING_NOT_INSTALLED = "not installed"
STRING_MISSING_FORMAT = "missing format"
class Py3status:
""" """
# available configuration parameters
config = {}
format = None
thresholds = []
def post_config_hook(self):
if not self.py3.check_commands("conky"):
raise Exception(STRING_NOT_INSTALLED)
elif not self.format:
raise Exception(STRING_MISSING_FORMAT)
# placeholders
placeholders = self.py3.get_placeholders_list(self.format)
_placeholders = [x.replace(".", " ") for x in placeholders]
colors = self.py3.get_color_names_list(self.format)
_colors = []
for color in colors:
if not getattr(self, f"color_{color}", None):
_colors.append(color.replace(".", " "))
self.placeholders = placeholders + colors
conky_placeholders = _placeholders + _colors
# init
self.cache_names = {}
self.thresholds_init = colors
self.config.update({"out_to_x": False, "out_to_console": True})
self.separator = "|SEPARATOR|" # must be upper
# make an output.
config = dumps(self.config, separators=(",", "=")).replace('"', "")
text = self.separator.join([f"${{{x}}}" for x in conky_placeholders])
tmp = f"conky.config = {config}\nconky.text = [[{text}]]"
# write tmp output to '/tmp/py3status-conky_*', make a command
self.tmpfile = NamedTemporaryFile(
prefix="py3status_conky-", suffix=".conf", delete=False
)
self.tmpfile.write(str.encode(tmp))
self.tmpfile.close()
self.conky_command = f"conky -c {self.tmpfile.name}".split()
# thread
self.line = ""
self.error = None
self.process = None
self.t = Thread(target=self._start_loop)
self.t.daemon = True
self.t.start()
def _cleanup(self):
self.process.kill()
Path(self.tmpfile).unlink()
self.py3.update()
def _start_loop(self):
try:
self.process = Popen(self.conky_command, stdout=PIPE, stderr=STDOUT)
while True:
line = self.process.stdout.readline().decode()
if self.process.poll() is not None or "conky:" in line:
raise Exception(line)
if self.line != line:
self.line = line
self.py3.update()
except Exception as err:
self.error = " ".join(format(err).split()[1:])
finally:
self._cleanup()
def conky(self):
if self.error:
self.py3.error(self.error, self.py3.CACHE_FOREVER)
conky_data = map(str.strip, self.line.split(self.separator))
conky_data = dict(zip(self.placeholders, conky_data))
if self.thresholds_init:
for k in list(conky_data):
try:
conky_data[self.cache_names[k]] = conky_data[k]
except KeyError:
self.cache_names[k] = k.replace(" ", ".")
conky_data[self.cache_names[k]] = conky_data[k]
for x in self.thresholds_init:
if x in conky_data:
self.py3.threshold_get_color(conky_data[x], x)
return {
"cached_until": self.py3.CACHE_FOREVER,
"full_text": self.py3.safe_format(self.format, conky_data),
}
def kill(self):
self._cleanup()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | 982409ee056b0d03014caefe2a535ab5 | 30.603175 | 80 | 0.567841 | 3.153167 | false | false | false | false |
ultrabug/py3status | py3status/modules/net_iplist.py | 2 | 5194 | """
Display list of network interfaces and IP addresses.
This module supports both IPv4 and IPv6. There is the possibility to blacklist
interfaces and IPs, as well as to show interfaces with no IP address. It will
show an alternate text if no IP are available.
Configuration parameters:
cache_timeout: refresh interval for this module in seconds.
(default 30)
format: format of the output.
(default 'Network: {format_iface}')
format_iface: format string for the list of IPs of each interface.
(default '{iface}:[ {ip4}][ {ip6}]')
format_no_ip: string to show if there are no IPs to display.
(default 'no connection')
iface_blacklist: list of interfaces to ignore. Accepts shell-style wildcards.
(default ['lo'])
iface_sep: string to write between interfaces.
(default ' ')
ip_blacklist: list of IPs to ignore. Accepts shell-style wildcards.
(default [])
ip_sep: string to write between IP addresses.
(default ',')
remove_empty: do not show interfaces with no IP.
(default True)
Format placeholders:
{format_iface} the format_iface string.
Format placeholders for format_iface:
{iface} name of the interface.
{ip4} list of IPv4 of the interface.
{ip6} list of IPv6 of the interface.
Color options:
color_bad: no IPs to show
color_good: IPs to show
Requires:
ip: utility found in iproute2 package
Examples:
```
net_iplist {
iface_blacklist = []
ip_blacklist = ['127.*', '::1']
}
```
@author guiniol
SAMPLE OUTPUT
{'color': '#00FF00',
'full_text': u'Network: wls1: 192.168.1.3 fe80::f861:44bd:694a:b99c'}
"""
import re
from fnmatch import fnmatch
class Py3status:
""" """
# available configuration parameters
cache_timeout = 30
format = "Network: {format_iface}"
format_iface = "{iface}:[ {ip4}][ {ip6}]"
format_no_ip = "no connection"
iface_blacklist = ["lo"]
iface_sep = " "
ip_blacklist = []
ip_sep = ","
remove_empty = True
def post_config_hook(self):
self.iface_re = re.compile(r"\d+: (?P<iface>[\w\-@]+):")
self.ip_re = re.compile(r"\s+inet (?P<ip4>[\d.]+)(?:/| )")
self.ip6_re = re.compile(
r"\s+inet6 (?P<ip6>[\da-f:]+)(?:/\d{1,3}| ) scope global dynamic"
)
def net_iplist(self):
response = {
"cached_until": self.py3.time_in(seconds=self.cache_timeout),
"full_text": "",
}
connection = False
data = self._get_data()
iface_list = []
for iface, ips in data.items():
if not self._check_blacklist(iface, self.iface_blacklist):
continue
ip4_list = []
ip6_list = []
for ip4 in ips.get("ip4", []):
if self._check_blacklist(ip4, self.ip_blacklist):
connection = True
ip4_list.append(ip4)
for ip6 in ips.get("ip6", []):
if self._check_blacklist(ip6, self.ip_blacklist):
connection = True
ip6_list.append(ip6)
iface_list.append(
self.py3.safe_format(
self.format_iface,
{
"iface": iface,
"ip4": self.ip_sep.join(ip4_list),
"ip6": self.ip_sep.join(ip6_list),
},
)
)
if not connection:
response["full_text"] = self.py3.safe_format(
self.format_no_ip,
{"format_iface": self.py3.composite_join(self.iface_sep, iface_list)},
)
response["color"] = self.py3.COLOR_BAD
else:
response["full_text"] = self.py3.safe_format(
self.format,
{"format_iface": self.py3.composite_join(self.iface_sep, iface_list)},
)
response["color"] = self.py3.COLOR_GOOD
return response
def _get_data(self):
txt = self.py3.command_output(["ip", "address", "show"]).splitlines()
data = {}
for line in txt:
iface = self.iface_re.match(line)
if iface:
cur_iface = iface.group("iface")
if not self.remove_empty:
data[cur_iface] = {}
continue
ip4 = self.ip_re.match(line)
if ip4:
data.setdefault(cur_iface, {}).setdefault("ip4", []).append(
ip4.group("ip4")
)
continue
ip6 = self.ip6_re.match(line)
if ip6:
data.setdefault(cur_iface, {}).setdefault("ip6", []).append(
ip6.group("ip6")
)
continue
return data
def _check_blacklist(self, string, blacklist):
for ignore in blacklist:
if fnmatch(string, ignore):
return False
return True
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | bfb50e1a4217124763b4de197dd96065 | 29.023121 | 86 | 0.535425 | 3.858841 | false | false | false | false |
ultrabug/py3status | py3status/modules/nvidia_smi.py | 2 | 7208 | r"""
Display NVIDIA properties currently exhibiting in the NVIDIA GPUs.
nvidia-smi, short for NVIDIA System Management Interface program, is a cross
platform tool that supports all standard NVIDIA driver-supported Linux distros.
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
format: display format for this module (default '{format_gpu}')
format_gpu: display format for NVIDIA GPUs
*(default '{gpu_name} [\?color=temperature.gpu {temperature.gpu}°C] '
'[\?color=memory.used_percent {memory.used_percent}%]')*
format_gpu_separator: show separator if more than one (default ' ')
memory_unit: specify memory unit, eg 'KiB', 'MiB', 'GiB', otherwise auto
(default None)
thresholds: specify color thresholds to use
(default [(0, 'good'), (65, 'degraded'), (75, 'orange'), (85, 'bad')])
Format placeholders:
{format_gpu} format for NVIDIA GPUs
format_gpu placeholders:
{index} Zero based index of the GPU.
{count} The number of NVIDIA GPUs in the system
{driver_version} The version of the installed NVIDIA display driver
{gpu_name} The official product name of the GPU
{gpu_uuid} Globally unique immutable identifier of the GPU
{memory.free} Total free memory
{memory.free_unit} Total free memory unit
{memory.total} Total installed GPU memory
{memory.total_unit} Total installed GPU memory unit
{memory.used} Total memory allocated by active contexts
{memory.used_percent} Total memory allocated by active contexts percentage
{memory.used_unit} Total memory unit
{temperature.gpu} Core GPU temperature in degrees C
Use `python /path/to/nvidia_smi.py --list-properties` for a full list of
supported NVIDIA properties to use. Not all of supported NVIDIA properties
will be usable. See `nvidia-smi --help-query-gpu` for more information.
Color thresholds:
format_gpu:
`xxx`: print a color based on the value of NVIDIA `xxx` property
Requires:
nvidia-smi: command line interface to query NVIDIA devices
Examples:
```
# display nvidia properties
nvidia_smi {
format_gpu = '{gpu_name} [\?color=temperature.gpu {temperature.gpu}°C] '
format_gpu += '[\?color=memory.used_percent {memory.used} {memory.used_unit}'
format_gpu += '[\?color=darkgray&show \|]{memory.used_percent:.1f}%]'
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'Quadro NVS 295 '},
{'color': '#00ff00', 'full_text': '51°C '},
{'color': '#00ff00', 'full_text': '60.8%'},
]
percent
[
{'full_text': 'GPU '},
{'full_text': '73°C ', 'color': '#ffff00'},
{'full_text': '192 MiB', 'color': '#ffa500'},
{'full_text': '|', 'color': '#a9a9a9'},
{'full_text': '75.3%', 'color': '#ffa500'}
]
"""
STRING_NOT_INSTALLED = "not installed"
class Py3status:
""" """
# available configuration parameters
cache_timeout = 10
format = "{format_gpu}"
format_gpu = (
r"{gpu_name} [\?color=temperature.gpu {temperature.gpu}°C] "
r"[\?color=memory.used_percent {memory.used_percent}%]"
)
format_gpu_separator = " "
memory_unit = None
thresholds = [(0, "good"), (65, "degraded"), (75, "orange"), (85, "bad")]
def post_config_hook(self):
command = "nvidia-smi --format=csv,noheader,nounits --query-gpu="
if not self.py3.check_commands(command.split()[0]):
raise Exception(STRING_NOT_INSTALLED)
properties = self.py3.get_placeholders_list(self.format_gpu)
format_gpu = {x: ":.1f" for x in properties if "used_percent" in x}
self.format_gpu = self.py3.update_placeholder_formats(
self.format_gpu, format_gpu
)
new_memory_properties = set()
new_properties = {"memory.used", "memory.total"}
for name in properties:
if "used_percent" in name:
continue
if name.startswith("memory"):
if name.endswith("_unit"):
name = name[:-5]
new_memory_properties.add(name)
new_properties.add(name)
self.properties = list(new_properties)
self.memory_properties = list(new_memory_properties)
self.memory_unit = self.memory_unit or "B"
self.nvidia_command = command + ",".join(self.properties)
self.thresholds_init = self.py3.get_color_names_list(self.format_gpu)
def _get_nvidia_data(self):
return self.py3.command_output(self.nvidia_command)
def nvidia_smi(self):
nvidia_data = self._get_nvidia_data()
new_gpu = []
for line in nvidia_data.splitlines():
gpu = dict(zip(self.properties, line.split(", ")))
gpu["memory.used_percent"] = (
float(gpu["memory.used"]) / float(gpu["memory.total"]) * 100
)
for key in self.memory_properties:
value, unit_key = float(gpu[key]) * 1024**2, key + "_unit"
value, unit_value = self.py3.format_units(value, self.memory_unit)
gpu.update({key: value, unit_key: unit_value})
for x in self.thresholds_init:
if x in gpu:
self.py3.threshold_get_color(gpu[x], x)
new_gpu.append(self.py3.safe_format(self.format_gpu, gpu))
format_gpu_separator = self.py3.safe_format(self.format_gpu_separator)
format_gpu = self.py3.composite_join(format_gpu_separator, new_gpu)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"format_gpu": format_gpu}),
}
if __name__ == "__main__":
from sys import argv
if "--list-properties" in argv:
from sys import exit
from json import dumps
from subprocess import check_output
help_cmd = "nvidia-smi --help-query-gpu"
help_data = check_output(help_cmd.split()).decode()
new_properties = []
e = ["Default", "Exclusive_Thread", "Exclusive_Process", "Prohibited"]
for line in help_data.splitlines():
if line.startswith('"'):
properties = line.split('"')[1::2]
for name in properties:
if name not in e:
new_properties.append(name)
properties = ",".join(new_properties)
gpu_cmd = "nvidia-smi --format=csv,noheader,nounits --query-gpu="
gpu_data = check_output((gpu_cmd + properties).split()).decode()
new_gpus = []
msg = "This GPU contains {} supported properties."
for line in gpu_data.splitlines():
gpu = dict(zip(new_properties, line.split(", ")))
gpu = {k: v for k, v in gpu.items() if "[Not Supported]" not in v}
gpu["= " + msg.format(len(gpu))] = ""
gpu["=" * (len(msg) + 2)] = ""
new_gpus.append(gpu)
print(dumps(new_gpus, sort_keys=True, indent=4))
exit()
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | eb972d59c7d106c7e9563bbbe20ba534 | 35.75 | 87 | 0.600861 | 3.592519 | false | false | false | false |
ultrabug/py3status | py3status/modules/spotify.py | 2 | 7535 | """
Display song currently playing in Spotify.
Configuration parameters:
button_next: button to switch to next song (default None)
button_play_pause: button to toggle play/pause (default None)
button_previous: button to switch to previous song (default None)
cache_timeout: how often to update the bar (default 5)
dbus_client: Used to override which app is used as a client for
spotify. If you use spotifyd as a client, set this to
'org.mpris.MediaPlayer2.spotifyd'
(default 'org.mpris.MediaPlayer2.spotify')
format: see placeholders below (default '{artist} : {title}')
format_down: define output if spotify is not running
(default 'Spotify not running')
format_stopped: define output if spotify is not playing
(default 'Spotify stopped')
sanitize_titles: whether to remove meta data from album/track title
(default True)
sanitize_words: which meta data to remove
*(default ['bonus', 'demo', 'edit', 'explicit', 'extended',
'feat', 'mono', 'remaster', 'stereo', 'version'])*
Format placeholders:
{album} album name
{artist} artiste name (first one)
{playback} state of the playback: Playing, Paused
{time} time duration of the song
{title} name of the song
Color options:
color_offline: Spotify is not running, defaults to color_bad
color_paused: Song is stopped or paused, defaults to color_degraded
color_playing: Song is playing, defaults to color_good
Requires:
python-dbus: to access dbus in python
spotify: a proprietary music streaming service
Examples:
```
spotify {
button_next = 4
button_play_pause = 1
button_previous = 5
format = "{title} by {artist} -> {time}"
format_down = "no Spotify"
}
```
@author Pierre Guilbert, Jimmy Garpehäll, sondrele, Andrwe
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': 'Rick Astley : Never Gonna Give You Up'}
paused
{'color': '#FFFF00', 'full_text': 'Rick Astley : Never Gonna Give You Up'}
stopped
{'color': '#FF0000', 'full_text': 'Spotify stopped'}
"""
import re
from datetime import timedelta
from time import sleep
import dbus
SPOTIFY_CMD = """dbus-send --print-reply --dest={dbus_client}
/org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.{cmd}"""
class Py3status:
""" """
# available configuration parameters
button_next = None
button_play_pause = None
button_previous = None
cache_timeout = 5
dbus_client = "org.mpris.MediaPlayer2.spotify"
format = "{artist} : {title}"
format_down = "Spotify not running"
format_stopped = "Spotify stopped"
sanitize_titles = True
sanitize_words = [
"bonus",
"demo",
"edit",
"explicit",
"extended",
"feat",
"mono",
"remaster",
"stereo",
"version",
]
def _spotify_cmd(self, action):
return SPOTIFY_CMD.format(dbus_client=self.dbus_client, cmd=action)
def post_config_hook(self):
""" """
# Match string after hyphen, comma, semicolon or slash containing any metadata word
# examples:
# - Remastered 2012
# / Radio Edit
# ; Remastered
self.after_delimiter = self._compile_re(
r"([\-,;/])([^\-,;/])*(META_WORDS_HERE).*"
)
# Match brackets with their content containing any metadata word
# examples:
# (Remastered 2017)
# [Single]
# (Bonus Track)
self.inside_brackets = self._compile_re(
r"([\(\[][^)\]]*?(META_WORDS_HERE)[^)\]]*?[\)\]])"
)
def _compile_re(self, expression):
"""
Compile given regular expression for current sanitize words
"""
meta_words = "|".join(self.sanitize_words)
expression = expression.replace("META_WORDS_HERE", meta_words)
return re.compile(expression, re.IGNORECASE)
def _get_playback_status(self):
"""
Get the playback status. One of: "Playing", "Paused" or "Stopped".
"""
return self.player.Get("org.mpris.MediaPlayer2.Player", "PlaybackStatus")
def _get_text(self):
"""
Get the current song metadatas (artist - title)
"""
bus = dbus.SessionBus()
try:
self.__bus = bus.get_object(self.dbus_client, "/org/mpris/MediaPlayer2")
self.player = dbus.Interface(self.__bus, "org.freedesktop.DBus.Properties")
try:
metadata = self.player.Get("org.mpris.MediaPlayer2.Player", "Metadata")
album = metadata.get("xesam:album")
artist = metadata.get("xesam:artist")[0]
microtime = metadata.get("mpris:length")
rtime = str(timedelta(seconds=microtime // 1_000_000))
title = metadata.get("xesam:title")
if self.sanitize_titles:
album = self._sanitize_title(album)
title = self._sanitize_title(title)
playback_status = self._get_playback_status()
if playback_status == "Playing":
color = self.py3.COLOR_PLAYING or self.py3.COLOR_GOOD
else:
color = self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED
except Exception:
return (
self.format_stopped,
self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED,
)
return (
self.py3.safe_format(
self.format,
dict(
title=title,
artist=artist,
album=album,
time=rtime,
playback=playback_status,
),
),
color,
)
except Exception:
return (self.format_down, self.py3.COLOR_OFFLINE or self.py3.COLOR_BAD)
def _sanitize_title(self, title):
"""
Remove redundant metadata from title and return it
"""
title = re.sub(self.inside_brackets, "", title)
title = re.sub(self.after_delimiter, "", title)
return title.strip()
def spotify(self):
"""
Get the current "artist - title" and return it.
"""
(text, color) = self._get_text()
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"color": color,
"full_text": text,
}
return response
def on_click(self, event):
""" """
button = event["button"]
if button == self.button_play_pause:
# we do not use the 'PlayPause' command because of a bug
# in spotifyd: https://github.com/Spotifyd/spotifyd/issues/890
playback_status = self._get_playback_status()
if playback_status == "Playing":
self.py3.command_run(self._spotify_cmd("Pause"))
else:
self.py3.command_run(self._spotify_cmd("Play"))
sleep(0.1)
elif button == self.button_next:
self.py3.command_run(self._spotify_cmd("Next"))
sleep(0.1)
elif button == self.button_previous:
self.py3.command_run(self._spotify_cmd("Previous"))
sleep(0.1)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | 4c7dfeefad3d0ebbd35313a02290b0d1 | 31.899563 | 91 | 0.569684 | 3.861609 | false | false | false | false |
ultrabug/py3status | py3status/modules/github.py | 2 | 9743 | """
Display Github notifications and issue/pull requests for a repo.
To check notifications a Github `username` and `personal access token` are
required. You can create a personal access token at
https://github.com/settings/tokens/new?scopes=notifications&description=py3status
The only `scope` needed is `notifications` is selected automatically for you,
which provides readonly access to notifications.
The Github API is rate limited so setting `cache_timeout` too small may cause
issues see https://developer.github.com/v3/#rate-limiting for details
Configuration parameters:
auth_token: Github personal access token, needed to check notifications
see above.
(default None)
button_action: Button that when clicked opens the Github notification page
if notifications, else the project page for the repository if there is
one (otherwise the github home page). Setting to `None` disables.
(default 3)
button_refresh: Button that when clicked refreshes module.
Setting to `None` disables.
(default 2)
cache_timeout: How often we refresh this module in seconds
(default 60)
format: display format for this module, see Examples below (default None)
format_notifications: Format of `{notification}` status placeholder.
(default ' N{notifications_count}')
notifications: Type of notifications can be `all` for all notifications or
`repo` to only get notifications for the repo specified. If repo is
not provided then all notifications will be checked.
(default 'all')
repo: Github repo to check
(default 'ultrabug/py3status')
url_api: Change only if using Enterprise Github, example https://github.domain.com/api/v3.
(default 'https://api.github.com')
url_base: Change only if using Enterprise Github, example https://github.domain.com.
(default 'https://github.com')
username: Github username, needed to check notifications.
(default None)
Format placeholders:
{issues} Number of open issues.
{notifications} Notifications. If no notifications this will be empty.
{notifications_count} Number of notifications. This is also the __Only__
placeholder available to `format_notifications`.
{pull_requests} Number of open pull requests
{repo} short name of the repository being checked. eg py3status
{repo_full} full name of the repository being checked. eg ultrabug/py3status
Examples:
```
# default formats
github {
# with username and auth_token, this will be used
format = '{repo} {issues}/{pull_requests}{notifications}'
# otherwise, this will be used
format '{repo} {issues}/{pull_requests}'
}
# set github access credentials
github {
auth_token = '40_char_hex_access_token'
username = 'my_username'
}
# just check for any notifications
github {
auth_token = '40_char_hex_access_token'
username = 'my_username'
format = 'Github {notifications_count}'
}
```
@author tobes
SAMPLE OUTPUT
{'full_text': 'py3status 34/24'}
notification
{'full_text': 'py3status 34/24 N3', 'urgent': True}
"""
import urllib.parse as urlparse
class Py3status:
""" """
# available configuration parameters
auth_token = None
button_action = 3
button_refresh = 2
cache_timeout = 60
format = None
format_notifications = " N{notifications_count}"
notifications = "all"
repo = "ultrabug/py3status"
url_api = "https://api.github.com"
url_base = "https://github.com"
username = None
def post_config_hook(self):
self.notification_warning = False
self.repo_warning = False
self._issues = "?"
self._pulls = "?"
self._notify = "?"
# remove a trailing slash in the urls
self.url_api = self.url_api.strip("/")
self.url_base = self.url_base.strip("/")
# Set format if user has not configured it.
if not self.format:
if self.username and self.auth_token:
# include notifications
self.format = "{repo} {issues}/{pull_requests}{notifications}"
else:
self.format = "{repo} {issues}/{pull_requests}"
def _github_count(self, url):
"""
Get counts for requests that return 'total_count' in the json response.
"""
url = self.url_api + url + "&per_page=1"
# if we have authentication details use them as we get better
# rate-limiting.
if self.username and self.auth_token:
auth = (self.username, self.auth_token)
else:
auth = None
try:
info = self.py3.request(url, auth=auth)
except self.py3.RequestException:
return
if info and info.status_code == 200:
return int(info.json()["total_count"])
if info.status_code == 422:
if not self.repo_warning:
self.py3.notify_user("Github repo cannot be found.")
self.repo_warning = True
return "?"
def _notifications(self):
"""
Get the number of unread notifications.
"""
if not self.username or not self.auth_token:
if not self.notification_warning:
self.py3.notify_user(
"Github module needs username and "
"auth_token to check notifications."
)
self.notification_warning = True
return "?"
if self.notifications == "all" or not self.repo:
url = self.url_api + "/notifications"
else:
url = self.url_api + "/repos/" + self.repo + "/notifications"
url += "?per_page=100"
try:
info = self.py3.request(url, auth=(self.username, self.auth_token))
except self.py3.RequestException:
return
if info.status_code == 200:
links = info.headers.get("Link")
if not links:
return len(info.json())
last_page = 1
for link in links.split(","):
if 'rel="last"' in link:
last_url = link[link.find("<") + 1 : link.find(">")]
parsed = urlparse.urlparse(last_url)
last_page = int(urlparse.parse_qs(parsed.query)["page"][0])
if last_page == 1:
return len(info.json())
try:
last_page_info = self.py3.request(
last_url, auth=(self.username, self.auth_token)
)
except self.py3.RequestException:
return
return len(info.json()) * (last_page - 1) + len(last_page_info.json())
if info.status_code == 404:
if not self.repo_warning:
self.py3.notify_user("Github repo cannot be found.")
self.repo_warning = True
def github(self):
status = {}
urgent = False
# issues
if self.repo and self.py3.format_contains(self.format, "issues"):
url = "/search/issues?q=state:open+type:issue+repo:" + self.repo
self._issues = self._github_count(url) or self._issues
status["issues"] = self._issues
# pull requests
if self.repo and self.py3.format_contains(self.format, "pull_requests"):
url = "/search/issues?q=state:open+type:pr+repo:" + self.repo
self._pulls = self._github_count(url) or self._pulls
status["pull_requests"] = self._pulls
# notifications
if self.py3.format_contains(self.format, "notifications*"):
count = self._notifications()
# if we don't have a notification count, then use the last value
# that we did have.
if count is None:
count = self._notify
self._notify = count
if count and count != "?":
notify = self.py3.safe_format(
self.format_notifications, {"notifications_count": count}
)
urgent = True
else:
notify = ""
status["notifications"] = notify
status["notifications_count"] = count
# repo
try:
status["repo"] = self.repo.split("/")[1]
except IndexError:
status["repo"] = "Error"
status["repo_full"] = self.repo
cached_until = self.py3.time_in(self.cache_timeout)
return {
"full_text": self.py3.safe_format(self.format, status),
"cached_until": cached_until,
"urgent": urgent,
}
def on_click(self, event):
button = event["button"]
if button == self.button_action:
# open github in browser
if self._notify and self._notify != "?":
# open github notifications page
url = self.url_base + "/notifications"
else:
if self.notifications == "all" and not self.repo:
# open github.com if there are no unread notifications and no repo
url = self.url_base
else:
# open repo page if there are no unread notifications
url = self.url_base + "/" + self.repo
# open url in default browser
self.py3.command_run(f"xdg-open {url}")
self.py3.prevent_refresh()
elif button != self.button_refresh:
# only refresh the module if needed
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | d0b25b6e9501d8b573e05f303c7687e2 | 35.354478 | 94 | 0.587601 | 4.2141 | false | false | false | false |
ultrabug/py3status | py3status/modules/glpi.py | 2 | 2483 | """
Display number of open tickets from GLPI.
It features thresholds to colorize the output and forces a low timeout to
limit the impact of a server connectivity problem on your i3bar freshness.
Configuration parameters:
cache_timeout: how often we refresh this module in seconds (default 300)
critical: set bad color above this threshold (default 20)
db: database to use (default '')
format: format of the module output (default '{tickets_open} tickets')
host: database host to connect to (default '')
password: login password (default '')
timeout: timeout for database connection (default 5)
user: login user (default '')
warning: set degraded color above this threshold (default 15)
Format placeholders:
{tickets_open} The number of open tickets
Color options:
color_bad: Open ticket above critical threshold
color_degraded: Open ticket above warning threshold
Requires:
MySQL-python: https://pypi.org/project/MySQL-python/
@author ultrabug
SAMPLE OUTPUT
{'full_text': '53 tickets'}
"""
import MySQLdb
class Py3status:
""" """
# available configuration parameters
cache_timeout = 300
critical = 20
db = ""
format = "{tickets_open} tickets"
host = ""
password = ""
timeout = 5
user = ""
warning = 15
def glpi(self):
response = {"full_text": ""}
mydb = MySQLdb.connect(
host=self.host,
user=self.user,
passwd=self.password,
db=self.db,
connect_timeout=self.timeout,
)
mycr = mydb.cursor()
mycr.execute(
"""select count(*)
from glpi_tickets
where closedate is NULL and solvedate is NULL;"""
)
row = mycr.fetchone()
if row:
open_tickets = int(row[0])
if open_tickets > self.critical:
response.update({"color": self.py3.COLOR_BAD})
elif open_tickets > self.warning:
response.update({"color": self.py3.COLOR_DEGRADED})
response["full_text"] = self.py3.safe_format(
self.format, {"tickets_open": open_tickets}
)
mydb.close()
response["cached_until"] = self.py3.time_in(self.cache_timeout)
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | c0b47548bda75f4420a035583a712ad0 | 26.898876 | 76 | 0.608538 | 4.05719 | false | false | false | false |
ultrabug/py3status | py3status/modules/timewarrior.py | 1 | 10181 | r"""
Track your time with Timewarrior.
Timewarrior is a time tracking utility that offers simple stopwatch features
as well as sophisticated calendar-base backfill, along with flexible reporting.
See https://taskwarrior.org/docs/timewarrior for more information.
Configuration parameters:
cache_timeout: refresh interval for this module, otherwise auto
(default None)
filter: specify interval and/or tag to filter (default '1day')
format: display format for this module
(default '[Timew {format_time}]|No Timew')
format_datetime: specify strftime characters to format (default {})
format_duration: display format for time duration
(default '\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}')
format_tag: display format for tags (default '\?color=state_tag {name}')
format_tag_separator: show separator if more than one (default ' ')
format_time: display format for tracked times
(default '[\?color=state_time [{format_tag} ]{format_duration}]')
format_time_separator: show separator if more than one (default ' ')
thresholds: specify color thresholds to use
*(default {'state_tag': [(0, 'darkgray'), (1, 'darkgray')],
'state_time': [(0, 'darkgray'), (1, 'degraded')]})*
Format placeholders:
{format_time} format for tracked times
{tracking} time tracking state, eg False, True
format_time placeholders:
{state} time tracking state, eg False, True
{format_tag} format for tags
{format_duration} format for time duration
{start} start date, eg 20171021T010203Z
{end} end date, eg 20171021T010203Z
format_tag placeholders:
{name} tag name, eg gaming, studying, gardening
format_datetime placeholders:
key: start, end
value: strftime characters, eg '%b %d' ----> 'Oct 06'
format_duration placeholders:
{days} days
{hours} hours
{minutes} minutes
{seconds} seconds
Color thresholds:
format_time:
state_time: print color based on the state of time tracking
format_tag:
state_tag: print color based on the state of time tracking
Requires:
timew: feature-rich time tracking utility
Recommendations:
We can refresh a module using `py3-cmd` command.
An excellent example of using this command in a function.
```
~/.{bash,zsh}{rc,_profile}
---------------------------
function timew () {
command timew "$@" && py3-cmd refresh timewarrior
}
```
With this, you can consider giving `cache_timeout` a much larger number,
eg 3600 (an hour), so the module does not need to be updated that often.
Examples:
```
# show times matching the filter, see documentation for more filters
timewarrior {
filter = ':day' # filter times not in 24 hours of current day
filter = '12hours' # filter times not in 12 hours of current time
filter = '5min' # filter times not in 5 minutes of current time
filter = '1sec' # filter times not in 1 second of current time
filter = '5pm to 11:59pm # filter times not in 5pm to 11:59pm range
}
# intervals
timewarrior {
# if you are printing other intervals too with '1day' filter or so,
# then you may want to add this too for better bar readability
format_time_separator = ', '
# you also can change the thresholds with different colors
thresholds = {
'state_tag': [(0, 'darkgray'), (1, 'degraded')],
'state_time': [(0, 'darkgray'), (1, 'degraded')],
}
}
# cache_timeout
timewarrior {
# auto refresh every 10 seconds when there is no active time tracking
# auto refresh every second when there is active time tracking
cache_timeout = None
# refresh every minute when there is no active time tracking
# refresh every second when there is active time tracking
cache_timeout = 60
# explicit refresh every 20 seconds when there is no active time tracking
# explicit refresh every 5 seconds when there is active time tracking
cache_timeout = (20, 5)
}
# add your snippets here
timewarrior {
format = "..."
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'Timew '},
{'full_text': 'gaming ', 'color': '#a9a9a9'},
{'full_text': '15:02 ', 'color': '#a9a9a9'},
{'full_text': 'studying ', 'color': '#a9a9a9'},
{'full_text': '03:42', 'color': '#ffff00'}
]
no_tag
[
{'full_text': 'Timew '},
{'full_text': 'gardening ', 'color': '#a9a9a9'},
{'full_text': '20:37', 'color': '#ffff00'}
]
no_timew
{'full_text': 'No Timew'}
"""
from json import loads as json_loads
import datetime as dt
STRING_NOT_INSTALLED = "not installed"
DATETIME = "%Y%m%dT%H%M%SZ"
STRING_INVALID_TIMEOUT = "invalid cache_timeout"
class Py3status:
""""""
# available configuration parameters
cache_timeout = None
filter = "1day"
format = "[Timew {format_time}]|No Timew"
format_datetime = {}
format_duration = r"\?not_zero [{days}d ][{hours}:]{minutes}:{seconds}"
format_tag = r"\?color=state_tag {name}"
format_tag_separator = " "
format_time = r"[\?color=state_time [{format_tag} ]{format_duration}]"
format_time_separator = " "
thresholds = {
"state_tag": [(0, "darkgray"), (1, "darkgray")],
"state_time": [(0, "darkgray"), (1, "degraded")],
}
class Meta:
update_config = {
"update_placeholder_format": [
{
"placeholder_formats": {"minutes": ":02d", "seconds": ":02d"},
"format_strings": ["format_duration"],
}
]
}
def post_config_hook(self):
if not self.py3.check_commands("timew"):
raise Exception(STRING_NOT_INSTALLED)
if self.cache_timeout is None:
self.sleep_timeout = 10
self.cache_timeout = 0
elif isinstance(self.cache_timeout, tuple):
if len(self.cache_timeout) != 2:
raise Exception(STRING_INVALID_TIMEOUT)
self.sleep_timeout = self.cache_timeout[0]
self.cache_timeout = self.cache_timeout[1]
elif isinstance(self.cache_timeout, int):
self.sleep_timeout = self.cache_timeout
self.cache_timeout = 0
self.timewarrior_command = "timew export"
if self.filter:
self.timewarrior_command += f" {self.filter}"
self.init = {"datetimes": []}
for word in ["start", "end"]:
if (self.py3.format_contains(self.format_time, word)) and (
word in self.format_datetime
):
self.init["datetimes"].append(word)
self.tracking = None
self.thresholds_init = {}
for name in ("format", "format_tag", "format_time"):
self.thresholds_init[name] = self.py3.get_color_names_list(
getattr(self, name)
)
def _get_timewarrior_data(self):
return json_loads(self.py3.command_output(self.timewarrior_command))
def _manipulate(self, data):
new_time = []
self.tracking = False
for i, time in enumerate(data):
time["index"] = len(data) - i
time["state_time"] = "end" not in time
# tags
new_tag = []
time["tags"] = time.get("tags", [])
for tag_name in time["tags"]:
tag_data = {"name": tag_name, "state_tag": time["state_time"]}
for x in self.thresholds_init["format_tag"]:
if x in tag_data:
self.py3.threshold_get_color(tag_data[x], x)
new_tag.append(self.py3.safe_format(self.format_tag, tag_data))
format_tag_separator = self.py3.safe_format(self.format_tag_separator)
format_tag = self.py3.composite_join(format_tag_separator, new_tag)
time["format_tag"] = format_tag
del time["tags"]
# duraton
if time["state_time"]:
self.tracking = True
end = dt.datetime.utcnow()
else:
end = dt.datetime.strptime(time["end"], DATETIME)
start = dt.datetime.strptime(time["start"], DATETIME)
duration = end - start
time["format_duration"] = self.py3.safe_format(
self.format_duration,
{
"days": duration.days,
"hours": duration.seconds // (60 * 60),
"minutes": (duration.seconds // 60) % 60,
"seconds": duration.seconds % 60,
},
)
# datetime
for word in self.init["datetimes"]:
if word in time:
time[word] = self.py3.safe_format(
dt.datetime.strftime(
dt.datetime.strptime(time[word], DATETIME),
self.format_datetime[word],
)
)
# time
for x in self.thresholds_init["format_time"]:
if x in time:
self.py3.threshold_get_color(time[x], x)
new_time.append(self.py3.safe_format(self.format_time, time))
format_time_separator = self.py3.safe_format(self.format_time_separator)
format_time = self.py3.composite_join(format_time_separator, new_time)
return format_time
def timewarrior(self):
timewarrior_data = self._get_timewarrior_data()
format_time = self._manipulate(timewarrior_data)
if self.tracking:
cached_until = self.cache_timeout
else:
cached_until = self.sleep_timeout
timew_data = {"format_time": format_time, "tracking": self.tracking}
for x in self.thresholds_init["format"]:
if x in timew_data:
self.py3.threshold_get_color(timew_data[x], x)
return {
"cached_until": self.py3.time_in(cached_until),
"full_text": self.py3.safe_format(self.format, timew_data),
}
if __name__ == "__main__":
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | 7e267d8d84ddbd433ee4feebef50279f | 32.60066 | 82 | 0.587565 | 3.836096 | false | false | false | false |
ultrabug/py3status | py3status/modules/moc.py | 2 | 5719 | r"""
Display song currently playing in moc.
MOC (music on console) is a console audio player for Linux/Unix designed to be
powerful and easy to use. It consists of two parts, a server (moc) and a
player/interface (mocp). It supports OGG, WAV, MP3 and other formats.
Configuration parameters:
button_next: mouse button to skip next track (default None)
button_pause: mouse button to pause/play the playback (default 1)
button_previous: mouse button to skip previous track (default None)
button_stop: mouse button to stop the playback (default 3)
cache_timeout: refresh interval for this module (default 5)
format: display format for this module
*(default '\?if=is_started [\?if=is_stopped \[\] moc|'
'[\?if=is_paused \|\|][\?if=is_playing >] {title}]')*
sleep_timeout: when moc is not running, this interval will be used to
allow one to refresh constantly with time placeholders and/or
to refresh once every minute rather than every few seconds
(default 20)
Control placeholders:
{is_paused} a boolean based on moc status
{is_playing} a boolean based on moc status
{is_started} a boolean based on moc status
{is_stopped} a boolean based on moc status
Format placeholders:
{album} album name, eg (new output here)
{artist} artist name, eg (new output here)
{avgbitrate} audio average bitrate, eg 230kbps
{bitrate} audio bitrate, eg 230kbps
{currentsec} elapsed time in seconds, eg 32
{currenttime} elapsed time in [HH:]MM:SS, eg 00:32
{file} file location, eg /home/user/Music...
{rate} audio rate, eg 44kHz
{songtitle} song title, eg (new output here)
{state} playback state, eg PLAY, PAUSE, STOP
{timeleft} time left in [HH:]MM:SS, eg 71:30
{title} track title, eg (new output here)
{totalsec} total time in seconds, eg 4322
{totaltime} total time in seconds, eg 72:02
Placeholders are retrieved directly from `mocp --info` command.
The list was harvested once and should not represent a full list.
Color options:
color_paused: Paused, defaults to color_degraded
color_playing: Playing, defaults to color_good
color_stopped: Stopped, defaults to color_bad
Requires:
moc: a console audio player with simple ncurses interface
Examples:
```
# see 'man mocp' for more buttons
moc {
on_click 9 = 'exec mocp --example'
}
```
@author lasers
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': '> Music For Programming - Mindaugaszq'}
paused
{'color': '#FFFF00', 'full_text': '|| Music For Programming - Mindaugaszq'}
stopped
{'color': '#FF0000', 'full_text': '[] moc'}
"""
STRING_NOT_INSTALLED = "not installed"
class Py3status:
""" """
# available configuration parameters
button_next = None
button_pause = 1
button_previous = None
button_stop = 3
cache_timeout = 5
format = (
r"\?if=is_started [\?if=is_stopped \[\] moc|"
r"[\?if=is_paused \|\|][\?if=is_playing >] {title}]"
)
sleep_timeout = 20
def post_config_hook(self):
if not self.py3.check_commands("mocp"):
raise Exception(STRING_NOT_INSTALLED)
self.color_stopped = self.py3.COLOR_STOPPED or self.py3.COLOR_BAD
self.color_paused = self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED
self.color_playing = self.py3.COLOR_PLAYING or self.py3.COLOR_GOOD
def _get_moc_data(self):
try:
data = self.py3.command_output("mocp --info")
is_started = True
except self.py3.CommandError:
data = {}
is_started = False
return is_started, data
def moc(self):
is_paused = is_playing = is_stopped = None
cached_until = self.sleep_timeout
color = self.py3.COLOR_BAD
data = {}
is_started, moc_data = self._get_moc_data()
if is_started:
cached_until = self.cache_timeout
for line in moc_data.splitlines():
category, value = line.split(": ", 1)
data[category.lower()] = value
self.state = data["state"]
if self.state == "PLAY":
is_playing = True
color = self.color_playing
elif self.state == "PAUSE":
is_paused = True
color = self.color_paused
elif self.state == "STOP":
is_stopped = True
color = self.color_stopped
return {
"cached_until": self.py3.time_in(cached_until),
"color": color,
"full_text": self.py3.safe_format(
self.format,
dict(
is_paused=is_paused,
is_playing=is_playing,
is_started=is_started,
is_stopped=is_stopped,
**data
),
),
}
def on_click(self, event):
"""
Control moc with mouse clicks.
"""
button = event["button"]
if button == self.button_pause:
if self.state == "STOP":
self.py3.command_run("mocp --play")
else:
self.py3.command_run("mocp --toggle-pause")
elif button == self.button_stop:
self.py3.command_run("mocp --stop")
elif button == self.button_next:
self.py3.command_run("mocp --next")
elif button == self.button_previous:
self.py3.command_run("mocp --prev")
else:
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | f9c2217d2e2693779efffd7805f30ba8 | 31.129213 | 78 | 0.595734 | 3.694444 | false | false | false | false |
ultrabug/py3status | py3status/modules/player_control.py | 2 | 5602 | """
Control Audacious or VLC media player.
Provides an icon to control simple functions of audio/video players:
- start (left click)
- stop (left click)
- pause (middle click)
Configuration parameters:
cache_timeout: how often to update in seconds (default 10)
debug: enable verbose logging (bool) (default False)
format: format of the output (default "{icon}")
pause_icon: (default '❚❚')
play_icon: (default '▶')
stop_icon: (default '◼')
supported_players: supported players (str) (comma separated list)
(default 'audacious,vlc')
volume_tick: percentage volume change on mouse wheel (int) (positive number
or None to disable it) (default 1)
Format placeholders:
{icon} an icon to control music/video players
@author Federico Ceratto <federico.ceratto@gmail.com>, rixx
@license BSD
SAMPLE OUTPUT
{'full_text': u'\u25b6'}
stop
{'full_text': u'\u25fc'}
pause
{'full_text': u'\u275a\u275a'}
"""
# Any contributor to this module should add his/her name to the @author
# line, comma separated.
from pathlib import Path
try:
import dbus
dbus_available = True
except: # noqa e722 // (ImportError, ModuleNotFoundError): # (py2, assumed py3)
dbus_available = False
class Py3status:
""" """
# available configuration parameters
cache_timeout = 10
debug = False
format = "{icon}"
pause_icon = "❚❚"
play_icon = "▶"
stop_icon = "◼"
supported_players = "audacious,vlc"
volume_tick = 1
def post_config_hook(self):
self.status = "stop"
self.icon = self.play_icon
def on_click(self, event):
""" """
buttons = (None, "left", "middle", "right", "up", "down")
try:
button = buttons[event["button"]]
except IndexError:
return
if button in ("up", "down"):
if self.volume_tick is None:
return
self._change_volume(button == "up")
return
if self.status == "play":
if button == "left":
self._stop()
elif button == "middle":
self._pause()
elif self.status == "stop":
if button == "left":
self._play()
elif self.status == "pause":
if button in ("left", "right"):
self._play()
def _run(self, command):
if self.debug:
self.py3.log(f"running {command}")
self.py3.command_run(command)
def _play(self):
self.status = "play"
self.icon = self.stop_icon
player_name = self._detect_running_player()
if player_name == "audacious":
self._run(["audacious", "-p"])
elif player_name == "vlc":
player = self._get_vlc()
if player:
player.Play()
def _stop(self):
self.status = "stop"
self.icon = self.play_icon
player_name = self._detect_running_player()
if player_name == "audacious":
self._run(["audacious", "-s"])
elif player_name == "vlc":
player = self._get_vlc()
if player:
player.Stop()
def _pause(self):
self.status = "pause"
self.icon = self.pause_icon
player_name = self._detect_running_player()
if player_name == "audacious":
self._run(["audacious", "-u"])
elif player_name == "vlc":
player = self._get_vlc()
if player:
player.Pause()
def _change_volume(self, increase):
"""Change volume using amixer"""
sign = "+" if increase else "-"
delta = f"{self.volume_tick}%{sign}"
self._run(["amixer", "-q", "sset", "Master", delta])
def _detect_running_player(self):
"""Detect running player process, if any"""
supported_players = self.supported_players.split(",")
running_players = []
for pid in Path("/proc").iterdir():
if not pid.name.isdigit():
continue
try:
player_name = (pid / "comm").read_bytes().decode().rstrip()
except: # noqa e722
# (IOError, FileNotFoundError): # (assumed py2, assumed py3)
continue
if player_name in supported_players:
running_players.append(player_name)
# Pick which player to use based on the order in self.supported_players
for player_name in supported_players:
if player_name in running_players:
if self.debug:
self.py3.log(f"found player: {player_name}")
# those players need the dbus module
if player_name == "vlc" and not dbus_available:
self.py3.log(f"{player_name} requires the dbus python module")
return None
return player_name
return None
def _get_vlc(self):
mpris = "org.mpris.MediaPlayer2"
mpris_slash = "/" + mpris.replace(".", "/")
bus = dbus.SessionBus()
proxy = bus.get_object(mpris + ".vlc", mpris_slash)
return dbus.Interface(proxy, dbus_interface=mpris + ".Player")
def player_control(self):
return dict(
full_text=self.py3.safe_format(self.format, {"icon": self.icon}),
cached_until=self.py3.time_in(self.cache_timeout),
)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | db1fd2792a761f5b0ac8781a3e11e574 | 28.246073 | 82 | 0.551378 | 3.855072 | false | false | false | false |
ultrabug/py3status | py3status/udev_monitor.py | 2 | 3726 | from collections import defaultdict, Counter
from time import sleep
from datetime import datetime
from py3status.constants import ON_TRIGGER_ACTIONS
try:
import pyudev
except ImportError:
pyudev = None
class UdevMonitor:
"""
This class allows us to react to udev events.
"""
def __init__(self, py3_wrapper):
"""
The udev monitoring will be lazy loaded if a module uses it.
"""
self.py3_wrapper = py3_wrapper
self.pyudev_available = pyudev is not None
self.throttle = defaultdict(Counter)
self.udev_consumers = defaultdict(list)
self.udev_observer = None
def _setup_pyudev_monitoring(self):
"""
Setup the udev monitor.
"""
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
self.udev_observer = pyudev.MonitorObserver(monitor, self._udev_event)
self.udev_observer.start()
self.py3_wrapper.log("udev monitoring enabled")
def _udev_event(self, action, device):
"""
This is a callback method that will trigger a refresh on subscribers.
"""
# self.py3_wrapper.log(
# f"detected udev action '{action}' on subsystem '{device.subsystem}'"
# )
if not self.py3_wrapper.i3bar_running:
return
self.trigger_actions(action, device.subsystem)
def subscribe(self, py3_module, trigger_action, subsystem):
"""
Subscribe the given module to the given udev subsystem.
Here we will lazy load the monitor if necessary and return success or
failure based on the availability of pyudev.
"""
if self.pyudev_available:
# lazy load the udev monitor
if self.udev_observer is None:
self._setup_pyudev_monitoring()
if trigger_action not in ON_TRIGGER_ACTIONS:
self.py3_wrapper.log(
f"module {py3_module.module_full_name}: invalid action "
f"{trigger_action} on udev events subscription"
)
return False
self.udev_consumers[subsystem].append((py3_module, trigger_action))
self.py3_wrapper.log(
f"module {py3_module.module_full_name} subscribed to udev events on {subsystem}"
)
return True
else:
self.py3_wrapper.log(
f"pyudev module not installed: module {py3_module.module_full_name} "
f"not subscribed to events on {subsystem}"
)
return False
def trigger_actions(self, action, subsystem):
"""
Refresh all modules which subscribed to the given subsystem.
"""
resolution = datetime.now().strftime("%S")[0]
for py3_module, trigger_action in self.udev_consumers[subsystem]:
if trigger_action in ON_TRIGGER_ACTIONS:
event_key = f"{subsystem}.{action}"
occurences = self.throttle[event_key][resolution]
# we allow at most 5 events per 10 seconds window
if occurences >= 5:
self.py3_wrapper.log(
f"udev event {event_key}: throttled after {occurences} occurences",
level="warning",
)
continue
self.py3_wrapper.log(
f"{event_key} udev event: refresh consumer {py3_module.module_full_name}"
)
sleep(0.1)
py3_module.force_update()
self.throttle[event_key].clear()
self.throttle[event_key][resolution] = occurences + 1
| bsd-3-clause | d8fb58e2c2c0be5dc934954c36356e09 | 36.26 | 96 | 0.574879 | 4.167785 | false | false | false | false |
ultrabug/py3status | py3status/modules/systemd_suspend_inhibitor.py | 2 | 2575 | r"""
Turn on and off systemd suspend inhibitor.
Configuration parameters:
format: display format for this module
(default '[\?color=state SUSPEND [\?if=state OFF|ON]]')
lock_types: specify state to inhibit, comma separated list
https://www.freedesktop.org/wiki/Software/systemd/inhibit/
(default ['handle-lid-switch', 'idle', 'sleep'])
thresholds: specify color thresholds to use
(default [(True, 'bad'), (False, 'good')])
Format placeholders:
{state} systemd suspend inhibitor state, eg True, False
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
@author Cyrinux https://github.com/cyrinux
@license BSD
SAMPLE OUTPUT
[{'full_text': 'SUSPEND ON', 'color': '#00FF00'}]
off
[{'full_text': 'SUSPEND OFF', 'color': '#FF0000'}]
"""
from dbus import SystemBus
from os import close
STRING_DBUS_EXCEPTION = "DBUS error, systemd-logind not started?"
STRING_BAD_LOCK_TYPES = "DBUS error, bad lock types used"
class Py3status:
""" """
# available configuration parameters
format = r"[\?color=state SUSPEND [\?if=state OFF|ON]]"
lock_types = ["handle-lid-switch", "idle", "sleep"]
thresholds = [(True, "bad"), (False, "good")]
def post_config_hook(self):
try:
self.login1 = SystemBus().get_object(
"org.freedesktop.login1", "/org/freedesktop/login1"
)
except Exception:
raise Exception(STRING_DBUS_EXCEPTION)
self.lock = None
self.lock_types = ":".join(self.lock_types)
self.thresholds_init = self.py3.get_color_names_list(self.format)
def systemd_suspend_inhibitor(self):
suspend_data = {"state": bool(self.lock)}
for x in self.thresholds_init:
if x in suspend_data:
self.py3.threshold_get_color(suspend_data[x], x)
return {
"cached_until": self.py3.CACHE_FOREVER,
"full_text": self.py3.safe_format(self.format, suspend_data),
}
def on_click(self, event):
if self.lock is None:
self.lock = self.login1.Inhibit(
self.lock_types,
"Py3Status",
"Systemd suspend inhibitor module",
"block",
dbus_interface="org.freedesktop.login1.Manager",
).take()
else:
close(self.lock)
self.lock = None
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | b08d0674bf3580e13199b8e6f7dde7c8 | 28.261364 | 73 | 0.603495 | 3.616573 | false | false | false | false |
ultrabug/py3status | py3status/formatter.py | 2 | 24224 | import re
from html import escape
from math import ceil
from numbers import Number
from urllib.parse import parse_qsl
from py3status.composite import Composite
from py3status.constants import COLOR_NAMES, COLOR_NAMES_EXCLUDED
def expand_color(color, default=None, passthrough=False, block=None):
"""
Expand various colors to #RRGGBB.
"""
if color:
if color[0] == "#":
color = color[1:]
try:
int(color, 16)
except ValueError:
return block
length = len(color)
if length in [3, 4]:
color = "".join(color[x] * 2 for x in range(length))
elif length not in [6, 8]:
return block
return "#" + color.upper()
elif block:
return block
return COLOR_NAMES.get(color, color if passthrough else default)
class Formatter:
"""
Formatter for processing format strings via the format method.
"""
TOKENS = [
r"(?P<block_start>\[)"
r"|(?P<block_end>\])"
r"|(?P<switch>\|)"
r"|(\\\?(?P<command>\S*)\s)"
r"|(?P<escaped>(\\.|\{\{|\}\}))"
r"|(?P<placeholder>(\{(?P<key>([^}\\\:\!]|\\.)*)(?P<format>([^}\\]|\\.)*)?\}))"
r"|(?P<literal>([^\[\]\\\{\}\|])+)"
r"|(?P<lost_brace>([{}]))"
]
reg_ex = re.compile(TOKENS[0], re.M | re.I)
block_cache = {}
format_string_cache = {}
def __init__(self, py3_wrapper=None):
self.py3_wrapper = py3_wrapper
def tokens(self, format_string):
"""
Get the tokenized format_string.
Tokenizing is resource intensive so we only do it once and cache it
"""
if format_string not in self.format_string_cache:
tokens = list(re.finditer(self.reg_ex, format_string))
self.format_string_cache[format_string] = tokens
return self.format_string_cache[format_string]
def get_color_names(self, format_string):
"""
Parses the format_string and returns a set of color names.
"""
names = set()
# Tokenize the format string and process them
for token in self.tokens(format_string):
if token.group("command"):
name = dict(parse_qsl(token.group("command"))).get("color")
if (
not name
or name in COLOR_NAMES_EXCLUDED
or name in COLOR_NAMES
or name[0] == "#"
):
continue
names.add(name)
return names
def get_placeholders(self, format_string):
"""
Parses the format_string and returns a set of placeholders.
"""
placeholders = set()
# Tokenize the format string and process them
for token in self.tokens(format_string):
if token.group("placeholder"):
placeholders.add(token.group("key"))
elif token.group("command"):
# get any placeholders used in commands
commands = dict(parse_qsl(token.group("command")))
# placeholders only used in `if`
if_ = commands.get("if")
if if_:
placeholders.add(Condition(if_).variable)
return placeholders
def get_placeholder_formats_list(self, format_string):
"""
Parses the format_string and returns a list of tuples
(placeholder, format).
"""
placeholders = []
# Tokenize the format string and process them
for token in self.tokens(format_string):
if token.group("placeholder"):
placeholders.append((token.group("key"), token.group("format")))
return placeholders
def update_placeholders(self, format_string, placeholders):
"""
Update a format string renaming placeholders.
"""
# Tokenize the format string and process them
output = []
for token in self.tokens(format_string):
if token.group("key") in placeholders:
output.append(
"{{{}{}}}".format(
placeholders[token.group("key")], token.group("format")
)
)
continue
elif token.group("command"):
# update any placeholders used in commands
commands = parse_qsl(token.group("command"), keep_blank_values=True)
# placeholders only used in `if`
if "if" in [x[0] for x in commands]:
items = []
for key, value in commands:
if key == "if":
# we have to rebuild from the parts we have
condition = Condition(value)
variable = condition.variable
if variable in placeholders:
variable = placeholders[variable]
# negation via `!`
not_ = "!" if not condition.default else ""
condition_ = condition.condition or ""
# if there is no condition then there is no
# value
if condition_:
value_ = condition.value
else:
value_ = ""
value = "{}{}{}{}".format(
not_, variable, condition_, value_
)
if value:
items.append(f"{key}={value}")
else:
items.append(key)
# we cannot use urlencode because it will escape things
# like `!`
output.append(r"\?{} ".format("&".join(items)))
continue
value = token.group(0)
output.append(value)
return "".join(output)
def update_placeholder_formats(self, format_string, placeholder_formats):
"""
Update a format string adding formats if they are not already present.
"""
# Tokenize the format string and process them
output = []
for token in self.tokens(format_string):
if (
token.group("placeholder")
and (not token.group("format"))
and token.group("key") in placeholder_formats
):
output.append(
f"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}"
)
continue
value = token.group(0)
output.append(value)
return "".join(output)
def build_block(self, format_string):
"""
Parse the format string into blocks containing Literals, Placeholders
etc that we can cache and reuse.
"""
first_block = Block(None, py3_wrapper=self.py3_wrapper)
block = first_block
# Tokenize the format string and process them
for token in self.tokens(format_string):
value = token.group(0)
if token.group("block_start"):
# Create new block
block = block.new_block()
elif token.group("block_end"):
# Close block setting any valid state as needed
# and return to parent block to continue
if not block.parent:
raise Exception("Too many `]`")
block = block.parent
elif token.group("switch"):
# a new option has been created
block = block.switch()
elif token.group("placeholder"):
# Found a {placeholder}
key = token.group("key")
format = token.group("format")
block.add(Placeholder(key, format))
elif token.group("literal"):
block.add(Literal(value))
elif token.group("lost_brace"):
# due to how parsing happens we can get a lonesome }
# eg in format_string '{{something}' this fixes that issue
block.add(Literal(value))
elif token.group("command"):
# a block command has been found
block.set_commands(token.group("command"))
elif token.group("escaped"):
# escaped characters add unescaped values
if value[0] in ["\\", "{", "}"]:
value = value[1:]
block.add(Literal(value))
if block.parent:
raise Exception("Block not closed")
# add to the cache
self.block_cache[format_string] = first_block
def format(
self,
format_string,
module=None,
param_dict=None,
force_composite=False,
attr_getter=None,
):
"""
Format a string, substituting place holders which can be found in
param_dict, attributes of the supplied module, or provided via calls to
the attr_getter function.
"""
if param_dict is None:
param_dict = {}
# if the processed format string is not in the cache then create it.
if format_string not in self.block_cache:
self.build_block(format_string)
first_block = self.block_cache[format_string]
def get_parameter(key):
"""
function that finds and returns the value for a placeholder.
"""
if key in param_dict:
# was a supplied parameter
param = param_dict.get(key)
elif module and hasattr(module, key):
param = getattr(module, key)
if hasattr(param, "__call__"):
# we don't allow module methods
raise Exception()
elif attr_getter:
# get value from attr_getter function
try:
param = attr_getter(key)
except: # noqa e722
raise Exception()
else:
raise Exception()
if isinstance(param, Composite):
if param.text():
param = param.copy()
else:
param = ""
return param
# render our processed format
valid, output = first_block.render(get_parameter, module)
# clean things up a little
if isinstance(output, list):
output = Composite(output)
if not output:
if force_composite:
output = Composite()
else:
output = ""
return output
class Placeholder:
"""
Class representing a {placeholder}
"""
def __init__(self, key, format):
self.key = key
self.format = format
def get(self, get_params, block):
"""
return the correct value for the placeholder
"""
value = f"{{{self.key}}}"
try:
value = value_ = get_params(self.key)
if self.format.startswith(":"):
# if a parameter has been set to be formatted as a numeric
# type then we see if we can coerce it to be. This allows
# the user to format types that normally would not be
# allowed eg '123' it also allows {:d} to be used as a
# shorthand for {:.0f}. Use {:g} to remove insignificant
# trailing zeroes and the decimal point too if there are
# no remaining digits following it. If the parameter cannot
# be successfully converted then the format will be removed.
try:
if "escape" in self.format:
value = escape(value)
if "ceil" in self.format:
value = ceil(float(value))
if "f" in self.format:
value = float(value)
if "g" in self.format:
value = float(value)
if "d" in self.format:
value = int(float(value))
output = f"{{[{self.key}]{self.format}}}"
value = output.format({self.key: value})
value_ = float(value)
except ValueError:
pass
elif self.format.startswith("!"):
output = f"{{{self.key}{self.format}}}"
value = value_ = output.format(**{self.key: value})
if block.commands.not_zero:
valid = value_ not in ["", None, False, "0", "0.0", 0, 0.0]
else:
# '', None, and False are ignored
# numbers like 0 and 0.0 are not.
valid = not (value_ in ["", None] or value_ is False)
enough = False
except: # noqa e722
# Exception raised when we don't have the param
enough = True
valid = False
return valid, value, enough
def __repr__(self):
return f"<Placeholder {{{self.repr()}}}>"
def repr(self):
if self.format:
value = f"{self.key}{self.format}"
else:
value = self.key
return f"{{{value}}}"
class Literal:
"""
Class representing some text
"""
def __init__(self, text):
self.text = text
def __repr__(self):
return f"<Literal {self.text}>"
def repr(self):
return self.text
class Condition:
"""
This class represents the if condition of a block It allows us to compare
the value of a parameter to a chosen value or just to see if it equates to
True
"""
condition = None
value = True
variable = None
def __init__(self, info):
# are we negated?
self.default = info[0] != "!"
if not self.default:
info = info[1:]
if "=" in info:
self.variable, self.value = info.split("=")
self.condition = "="
self.check_valid = self._check_valid_condition
elif ">" in info:
self.variable, self.value = info.split(">")
self.condition = ">"
self.check_valid = self._check_valid_condition
elif "<" in info:
self.variable, self.value = info.split("<")
self.condition = "<"
self.check_valid = self._check_valid_condition
else:
self.variable = info
self.check_valid = self._check_valid_basic
def _check_valid_condition(self, get_params):
"""
Check if the condition has been met.
We need to make sure that we are of the correct type.
"""
try:
variable = get_params(self.variable)
except: # noqa e722
variable = None
value = self.value
# if None, return oppositely
if variable is None:
return not self.default
# convert the value to a correct type
if isinstance(variable, bool):
value = bool(self.value)
elif isinstance(variable, Number):
try:
value = int(self.value)
except: # noqa e722
try:
value = float(self.value)
except: # noqa e722
# could not parse
return not self.default
# compare and return the result
if self.condition == "=":
return (variable == value) == self.default
elif self.condition == ">":
return (variable > value) == self.default
elif self.condition == "<":
return (variable < value) == self.default
def _check_valid_basic(self, get_params):
"""
Simple check that the variable is set
"""
try:
if get_params(self.variable):
return self.default
except: # noqa e722
pass
return not self.default
class BlockConfig:
r"""
Block commands eg [\?color=bad ...] are stored in this object
"""
REGEX_COLOR = re.compile("#[0-9A-F]{6}")
INHERITABLE = ["color", "not_zero", "show"]
# defaults
_if = None
color = None
max_length = None
min_length = 0
not_zero = False
show = False
soft = False
def __init__(self, parent):
# inherit any commands from the parent block
# inheritable commands are in self.INHERITABLE
if parent:
parent_commands = parent.commands
for attr in self.INHERITABLE:
setattr(self, attr, getattr(parent_commands, attr))
def update_commands(self, commands_str):
"""
update with commands from the block
"""
commands = dict(parse_qsl(commands_str, keep_blank_values=True))
_if = commands.get("if", self._if)
if _if:
self._if = Condition(_if)
self._set_int(commands, "max_length")
self._set_int(commands, "min_length")
self.color = expand_color(
commands.get("color"), passthrough=True, block=self.color
)
self.not_zero = "not_zero" in commands or self.not_zero
self.show = "show" in commands or self.show
self.soft = "soft" in commands or self.soft
def _set_int(self, commands, name):
"""
set integer value from commands
"""
if name in commands:
try:
value = int(commands[name])
setattr(self, name, value)
except ValueError:
pass
class Block:
"""
class representing a [block] of a format string
"""
def __init__(self, parent, base_block=None, py3_wrapper=None):
self.base_block = base_block
self.commands = BlockConfig(parent)
self.content = []
self.next_block = None
self.parent = parent
self.py3_wrapper = py3_wrapper
def set_commands(self, command_str):
"""
set any commands for this block
"""
self.commands.update_commands(command_str)
def add(self, item):
self.content.append(item)
def new_block(self):
"""
create a new sub block to the current block and return it.
the sub block is added to the current block.
"""
child = Block(self, py3_wrapper=self.py3_wrapper)
self.add(child)
return child
def switch(self):
"""
block has been split via | so we need to start a new block for that
option and return it to the user.
"""
base_block = self.base_block or self
self.next_block = Block(
self.parent, base_block=base_block, py3_wrapper=self.py3_wrapper
)
return self.next_block
def __repr__(self):
return f"<Block {self.repr()}>"
def repr(self):
my_repr = [x.repr() for x in self.content]
if self.next_block:
my_repr.extend(["|"] + self.next_block.repr())
return my_repr
def check_valid(self, get_params):
"""
see if the if condition for a block is valid
"""
if self.commands._if:
return self.commands._if.check_valid(get_params)
def render(self, get_params, module, _if=None):
"""
render the block and return the output.
"""
enough = False
output = []
valid = None
if self.commands.show:
valid = True
if self.parent and self.commands.soft and _if is None:
return None, self
if _if:
valid = True
elif self.commands._if:
valid = self.check_valid(get_params)
if valid is not False:
for item in self.content:
if isinstance(item, Placeholder):
sub_valid, sub_output, enough = item.get(get_params, self)
output.append(sub_output)
elif isinstance(item, Literal):
sub_valid = None
enough = True
output.append(item.text)
elif isinstance(item, Block):
sub_valid, sub_output = item.render(get_params, module)
if sub_valid is None:
output.append(sub_output)
else:
output.extend(sub_output)
valid = valid or sub_valid
if not valid:
if self.next_block:
valid, output = self.next_block.render(
get_params, module, _if=self.commands._if
)
elif self.parent is None and (
(not self.next_block and enough) or self.base_block
):
valid = True
else:
output = []
# clean
color = self.commands.color
if color and color[0] != "#":
color_name = f"color_{color}"
threshold_color_name = f"color_threshold_{color}"
# substitute color
color = (
getattr(module, color_name, None)
or getattr(module, threshold_color_name, None)
or getattr(module.py3, color_name.upper(), None)
)
if color == "hidden":
return False, []
text = ""
out = []
if isinstance(output, str):
output = [output]
# merge as much output as we can.
first = True
last_block = None
for index, item in enumerate(output):
is_block = isinstance(item, Block)
if not is_block and item:
last_block = None
if isinstance(item, (str, bool, int, float, bytes)) or item is None:
text += str(item)
continue
elif text:
if not first and (text == "" or out and out[-1].get("color") == color):
out[-1]["full_text"] += text
else:
part = {"full_text": text}
if color:
part["color"] = color
out.append(part)
text = ""
if isinstance(item, Composite):
if color:
item.composite_update(item, {"color": color}, soft=True)
out.extend(item.get_content())
elif is_block:
# if this is a block then likely it is soft.
if not out:
continue
for other in output[index + 1 :]:
if other and not isinstance(other, Block):
valid, _output = item.render(get_params, module, _if=True)
if _output and _output != last_block:
last_block = _output
out.extend(_output)
break
else:
if item:
out.append(item)
first = False
# add any left over text
if text:
part = {"full_text": text}
if color:
part["color"] = color
out.append(part)
# process any min/max length commands
max_length = self.commands.max_length
min_length = self.commands.min_length
if max_length or min_length:
for item in out:
if max_length is not None:
item["full_text"] = item["full_text"][:max_length]
max_length -= len(item["full_text"])
if min_length:
min_length -= len(item["full_text"])
if min_length > 0:
out[0]["full_text"] = " " * min_length + out[0]["full_text"]
min_length = 0
return valid, out
| bsd-3-clause | e15ae8b50b1294c6914a5a59d55fedd4 | 33.409091 | 88 | 0.495376 | 4.619375 | false | false | false | false |
ultrabug/py3status | py3status/composite.py | 2 | 4546 | class Composite:
"""
Helper class to identify a composite and store its content
A Composite is essentially a wrapped list containing response items.
"""
def __init__(self, content=None):
# try and create a composite from various input types
if content is None:
content = []
elif isinstance(content, Composite):
content = content.get_content()[:]
elif isinstance(content, dict):
content = [content]
elif isinstance(content, str):
content = [{"full_text": content}]
assert isinstance(content, list)
self._content = content
def __repr__(self):
return f"<Composite {self._content}>"
def __len__(self):
return len(self._content)
def __getitem__(self, key):
if isinstance(key, slice):
return Composite(self._content[key])
return self._content[key]
def __setitem__(self, key, value):
self._content[key] = value
def __delitem__(self, key):
del self._content[key]
def __iter__(self):
return iter(self._content)
def __iadd__(self, other):
self.append(other)
return self
def copy(self):
"""
Return a shallow copy of the Composite
"""
return Composite([x.copy() for x in self._content])
def append(self, item):
"""
Add an item to the Composite. Item can be a Composite, list etc
"""
if isinstance(item, Composite):
self._content += item.get_content()
elif isinstance(item, list):
self._content += item
elif isinstance(item, dict):
self._content.append(item)
elif isinstance(item, str):
self._content.append({"full_text": item})
else:
msg = "{!r} not suitable to append to Composite"
raise Exception(msg.format(item))
def get_content(self):
"""
Retrieve the contained list
"""
return self._content
def text(self):
"""
Return the text only component of the composite.
"""
return "".join(x.get("full_text", "") for x in self._content)
def simplify(self):
"""
Simplify the content of a Composite merging any parts that can be
and returning the new Composite as well as updating itself internally
"""
final_output = []
diff_last = None
item_last = None
for item in self._content:
# remove any undefined colors
if hasattr(item.get("color"), "none_setting"):
del item["color"]
# ignore empty items
if not item.get("full_text") and not item.get("separator"):
continue
# merge items if we can
diff = item.copy()
del diff["full_text"]
if diff == diff_last or (item["full_text"].strip() == "" and item_last):
item_last["full_text"] += item["full_text"]
else:
diff_last = diff
item_last = item.copy() # copy item as we may change it
final_output.append(item_last)
self._content = final_output
return self
@staticmethod
def composite_join(separator, items):
"""
Join a list of items with a separator.
This is used in joining strings, responses and Composites.
The output will be a Composite.
"""
output = Composite()
first_item = True
for item in items:
# skip empty items
if not item:
continue
# skip separator on first item
if first_item:
first_item = False
else:
output.append(separator)
output.append(item)
return output
@staticmethod
def composite_update(item, update_dict, soft=False):
"""
Takes a Composite (item) and updates all entries with values from
update_dict. Updates can be soft in which case existing values are not
overwritten.
If item is of type string it is first converted to a Composite
"""
item = Composite(item)
for part in item.get_content():
if soft:
for key, value in update_dict.items():
if key not in part:
part[key] = value
else:
part.update(update_dict)
return item
| bsd-3-clause | dbf19dd51714d984af90923ab92c3ac5 | 30.136986 | 84 | 0.540255 | 4.629328 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.