repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ultrabug/py3status | py3status/modules/mail.py | 2 | 13567 | r"""
Display number of messages in various mailbox formats.
This module supports Maildir, mbox, MH, Babyl, MMDF, and IMAP.
Configuration parameters:
accounts: specify a dict consisting of mailbox types and a list of dicts
consisting of mailbox settings and/or paths to use (default {})
cache_timeout: refresh interval for this module (default 60)
format: display format for this module
(default '\?not_zero Mail {mail}|No Mail')
thresholds: specify color thresholds to use (default [])
Format placeholders:
{mail} number of messages
{maildir} number of Maildir messages
{mbox} number of mbox messages
{mh} number of MH messages
{babyl} number of Babyl messages
{mmdf} number of MMDF messages
{imap} number of IMAP messages
We can divide mailbox, eg `{maildir}`, into numbered placeholders based
on number of mailbox accounts, eg `{maildir_1}`, and if we add `name` to
a mailbox account, we can use `{name}` placeholder instead, eg `{home}`.
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
IMAP Subscriptions:
You can specify a list of filters to decide which folders to search.
By default, we search only the INBOX folder (ie: `['^INBOX$']`). We
can use regular expressions, so if you use more than one, it would
be joined by a logical operator `or`.
`'.*'` will match all folders.
`'pattern'` will match folders containing `pattern`.
`'^pattern'` will match folders beginning with `pattern`.
`'pattern$'` will match folders ending with `pattern`.
`'^((?![Ss][Pp][Aa][Mm]).)*$'` will match all folders
except for every possible case of `spam` folders.
For more documentation, see https://docs.python.org/3/library/re.html
and/or any regex builder on the web. Don't forget to escape characters.
Examples:
```
# add multiple accounts
mail { #
accounts = { # {mail}
'maildir': [ # βββ {maildir}
{'path': '~/.mutt'}, # β βββ {maildir_1}
{'path': '~/Mail'}, # β βββ {maildir_2}
], # β
'mbox': [ # βββ {mbox}
{'path': '~/home.mbox'}, # β βββ {mbox_1}
{ # β βββ {mbox_2}
'name': 'local', # <----β----β----βββ {local}
'path': '~/mbox' # β β
}, # β β
{ # β βββ {mbox_3}
'name': 'debian', # <----β---------βββ {debian}
'path': '/var/mail/$USER' # β
'urgent': False, # <----β---- disable urgent
}, # β
], # β
'mh': [ # βββ {mh}
{'path': '~/mh_mail'}, # β βββ {mh_1}
], # β
'babyl': [ # βββ {babyl}
{'path': '~/babyl_mail'}, # β βββ {babyl_1}
], # β
'mmdf': [ # βββ {mmdf}
{'path': '~/mmdf_mail'}, # β βββ {mmdf_1}
] # β
'imap': [ # βββ {imap}
{ # β βββ {imap_1}
'name': 'home', # <----β----β----βββ {home}
'user': 'lasers', # β β
'password': 'kiss_my_butt!', # β β
'server': 'imap.gmail.com', # β βΒ
# <---β----β no filters to
'port': 993, # β β search folders, use
# β β filters ['^INBOX$']
}, # β β
{ # β βββ {imap_2}
'name': 'work', # <----β---------βββ {work}
'user': 'tobes', # β
'password': 'i_love_python', #
'server': 'imap.yahoo.com', #
# <---- no port, use port 993
'urgent': False, # <---- disable urgent
# for this account
'filters': ['^INBOX$'] # <---- specify a list of filters
# to search folders
'log': True, # <---- print a list of folders
} # to filter in the log
]
}
allow_urgent = False <---- disable urgent for all accounts
}
# add colors, disable urgent
mail {
format = '[\?color=mail&show Mail] {mail}'
thresholds = [(1, 'good'), (5, 'degraded'), (15, 'bad')]
allow_urgent = False
}
# identify the mailboxes, remove what you don't need
mail {
format = '[\?color=mail '
format += '[\?if=imap&color=#00ff00 IMAP ]'
format += '[\?if=maildir&color=#ffff00 MAILDIR ]'
format += '[\?if=mbox&color=#ff0000 MBOX ]'
format += '[\?if=babyl&color=#ffa500 BABYL ]'
format += '[\?if=mmdf&color=#00bfff MMDF ]'
format += '[\?if=mh&color=#ee82ee MH ]'
format += ']'
format += '[\?not_zero&color Mail {mail}|No Mail]'
}
# individual colorized mailboxes, remove what you don't need
mail {
format = '[\?if=imap&color=#00ff00 IMAP] {imap} '
format += '[\?if=maildir&color=#ffff00 MAILDIR] {maildir} '
format += '[\?if=mbox&color=#ff0000 MBOX] {mbox} '
format += '[\?if=babyl&color=#ffa500 BABYL] {babyl} '
format += '[\?if=mmdf&color=#00bfff MMDF] {mmdf} '
format += '[\?if=mh&color=#ee82ee MH] {mh}'
allow_urgent = False
}
```
@author lasers
SAMPLE OUTPUT
{'full_text': 'Mail 15', 'urgent': True}
identified
[
{'full_text': 'IMAP ', 'color': '#00ff00'},
{'full_text': 'MAILDIR ', 'color': '#ffff00'},
{'full_text': 'MBOX ', 'color': '#ff0000'},
{'full_text': 'Mail 15'},
]
individualized
[
{'full_text': 'IMAP ', 'color': '#00ff00'}, {'full_text': 'Mail 10 '},
{'full_text': 'MAILDIR ', 'color': '#ffff00'}, {'full_text': 'Mail 2 '},
{'full_text': 'MBOX ', 'color': '#ff0000'}, {'full_text': 'Mail 3'},
]
no_mail
{'full_text': 'No Mail'}
"""
import mailbox
import os
from csv import reader
from imaplib import IMAP4_SSL, IMAP4
from pathlib import Path
STRING_MISSING = "missing {} {}"
STRING_INVALID_NAME = "invalid name `{}`"
STRING_INVALID_BOX = "invalid mailbox `{}`"
STRING_INVALID_FILTER = "invalid imap filters `{}`"
class Py3status:
""""""
# available configuration parameters
accounts = {}
cache_timeout = 60
format = r"\?not_zero Mail {mail}|No Mail"
thresholds = []
def post_config_hook(self):
if not self.accounts:
raise Exception("missing accounts")
self.first_run = True
self.mailboxes = {}
mailboxes = ["Maildir", "mbox", "mh", "Babyl", "MMDF", "IMAP"]
lowercased_names = [x.lower() for x in mailboxes]
reserved_names = lowercased_names + ["mail"]
for mail, accounts in self.accounts.items():
if mail not in lowercased_names:
raise Exception(STRING_INVALID_BOX.format(mail))
self.mailboxes[mail] = []
for account in accounts:
if "name" in account:
name = account["name"]
strip = name.rstrip("_0123456789")
if any(x in [name, strip] for x in reserved_names):
raise Exception(STRING_INVALID_NAME.format(name))
reserved_names.append(name)
account.setdefault("urgent", True)
if mail == "imap":
for v in ["user", "password", "server"]:
if v not in account:
raise Exception(STRING_MISSING.format(mail, v))
account.setdefault("port", 993)
if "filters" in account:
filters = account["filters"]
if not isinstance(filters, list):
raise Exception(STRING_INVALID_FILTER.format(filters))
else:
account["filters"] = ["^INBOX$"]
account["folders"] = []
self.mailboxes[mail].append(account)
else:
for box in mailboxes[:-1]:
if mail == box.lower():
if "path" not in account:
raise Exception(STRING_MISSING.format(mail, "path"))
path = Path(
os.path.expandvars(account["path"])
).expanduser()
if not path.exists():
path = f"path: {path}"
raise Exception(STRING_MISSING.format(mail, path))
account["box"] = box
account["path"] = path
self.mailboxes[mail].append(account)
break
self.thresholds_init = self.py3.get_color_names_list(self.format)
def mail(self):
mail_data = {"mail": 0, "urgent": False}
for k, v in self.mailboxes.items():
mail_data[k] = 0
for i, account in enumerate(v, 1):
if k == "imap":
inbox = IMAP4_SSL(account["server"], account["port"])
inbox.login(account["user"], account["password"])
if self.first_run:
import re
filters = "|".join(account.pop("filters"))
objs = [x.decode() for x in inbox.list()[1]]
folders = [x[-1] for x in reader(objs, delimiter=" ")]
lines = [f"===== IMAP {i} ====="]
for name in folders:
subscribed = " "
try:
if re.search(filters, name):
subscribed = "x"
folder = name.replace("\\", "\\\\")
folder = folder.replace('"', '\\"')
folder = f'"{folder}"'
account["folders"].append(folder)
except re.error:
account["folders"] = []
break
lines.append(f"[{subscribed}] {name}")
if not account["folders"]:
self.py3.error(
STRING_INVALID_FILTER.format(filters),
self.py3.CACHE_FOREVER,
)
if account.get("log") is True:
for line in lines:
self.py3.log(line)
count_mail = 0
for folder in account["folders"]:
if inbox.select(folder, readonly=True)[0] == "OK":
imap_data = inbox.search(None, "(UNSEEN)")
count_mail += len(imap_data[1][0].split())
else:
account["folders"].remove(folder)
try:
inbox.close()
inbox.logout()
except IMAP4.error:
pass
else:
inbox = getattr(mailbox, account["box"])(
account["path"], create=False
)
count_mail = len(inbox)
inbox.close()
if "name" in account:
mail_data[account["name"]] = count_mail
if account["urgent"] and count_mail:
mail_data["urgent"] = True
mail_data[f"{k}_{i}"] = count_mail
mail_data["mail"] += count_mail
mail_data[k] += count_mail
for x in self.thresholds_init:
if x in mail_data:
self.py3.threshold_get_color(mail_data[x], x)
self.first_run = False
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, mail_data),
}
if mail_data["urgent"]:
response["urgent"] = True
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | 5eb341b9dce20808e60280492b5116f7 | 41.278481 | 84 | 0.423278 | 4.134943 | false | false | false | false |
ultrabug/py3status | py3status/constants.py | 2 | 7247 | # This file contains various useful constants for py3status
GENERAL_DEFAULTS = {
"color_bad": "#FF0000",
"color_degraded": "#FFFF00",
"color_good": "#00FF00",
"color_separator": "#333333",
"colors": True,
"interval": 5,
"output_format": "i3bar",
}
MAX_NESTING_LEVELS = 4
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
TZTIME_FORMAT = "%Y-%m-%d %H:%M:%S %Z"
TIME_MODULES = ["time", "tztime"]
I3S_INSTANCE_MODULES = [
"battery",
"cpu_temperature",
"disk",
"ethernet",
"memory",
"path_exists",
"read_file",
"run_watch",
"tztime",
"volume",
"wireless",
]
I3S_SINGLE_NAMES = ["cpu_usage", "ddate", "ipv6", "load", "time"]
I3S_ALLOWED_COLORS = ["color_bad", "color_good", "color_degraded"]
# i3status modules that allow colors to be passed.
# general section also allows colors so is included.
I3S_COLOR_MODULES = ["general", "battery", "cpu_temperature", "disk", "load"]
I3S_MODULE_NAMES = I3S_SINGLE_NAMES + I3S_INSTANCE_MODULES
CONFIG_FILE_SPECIAL_SECTIONS = ["general", "py3status"]
ERROR_CONFIG = """
general {colors = true interval = 60}
order += "static_string py3status"
order += "tztime local"
order += "group error"
static_string py3status {format = "py3status"}
tztime local {format = "%c"}
group error{
button_next = 1
button_prev = 0
fixed_width = False
format = "{output}"
static_string error_min {format = "CONFIG ERROR" color = "#FF0000"}
static_string error {format = "$error" color = "#FF0000"}
}
"""
COLOR_NAMES_EXCLUDED = ["good", "bad", "degraded", "separator", "threshold", "None"]
COLOR_NAMES = {
"aliceblue": "#F0F8FF",
"antiquewhite": "#FAEBD7",
"aqua": "#00FFFF",
"aquamarine": "#7FFFD4",
"azure": "#F0FFFF",
"beige": "#F5F5DC",
"bisque": "#FFE4C4",
"black": "#000000",
"blanchedalmond": "#FFEBCD",
"blue": "#0000FF",
"blueviolet": "#8A2BE2",
"brown": "#A52A2A",
"burlywood": "#DEB887",
"cadetblue": "#5F9EA0",
"chartreuse": "#7FFF00",
"chocolate": "#D2691E",
"coral": "#FF7F50",
"cornflowerblue": "#6495ED",
"cornsilk": "#FFF8DC",
"crimson": "#DC143C",
"cyan": "#00FFFF",
"darkblue": "#00008B",
"darkcyan": "#008B8B",
"darkgoldenrod": "#B8860B",
"darkgray": "#A9A9A9",
"darkgrey": "#A9A9A9",
"darkgreen": "#006400",
"darkkhaki": "#BDB76B",
"darkmagenta": "#8B008B",
"darkolivegreen": "#556B2F",
"darkorange": "#FF8C00",
"darkorchid": "#9932CC",
"darkred": "#8B0000",
"darksalmon": "#E9967A",
"darkseagreen": "#8FBC8F",
"darkslateblue": "#483D8B",
"darkslategray": "#2F4F4F",
"darkslategrey": "#2F4F4F",
"darkturquoise": "#00CED1",
"darkviolet": "#9400D3",
"deeppink": "#FF1493",
"deepskyblue": "#00BFFF",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1E90FF",
"firebrick": "#B22222",
"floralwhite": "#FFFAF0",
"forestgreen": "#228B22",
"fuchsia": "#FF00FF",
"gainsboro": "#DCDCDC",
"ghostwhite": "#F8F8FF",
"gold": "#FFD700",
"goldenrod": "#DAA520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#ADFF2F",
"honeydew": "#F0FFF0",
"hotpink": "#FF69B4",
"indianred": "#CD5C5C",
"indigo": "#4B0082",
"ivory": "#FFFFF0",
"khaki": "#F0E68C",
"lavender": "#E6E6FA",
"lavenderblush": "#FFF0F5",
"lawngreen": "#7CFC00",
"lemonchiffon": "#FFFACD",
"lightblue": "#ADD8E6",
"lightcoral": "#F08080",
"lightcyan": "#E0FFFF",
"lightgoldenrodyellow": "#FAFAD2",
"lightgray": "#D3D3D3",
"lightgrey": "#D3D3D3",
"lightgreen": "#90EE90",
"lightpink": "#FFB6C1",
"lightsalmon": "#FFA07A",
"lightseagreen": "#20B2AA",
"lightskyblue": "#87CEFA",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#B0C4DE",
"lightyellow": "#FFFFE0",
"lime": "#00FF00",
"limegreen": "#32CD32",
"linen": "#FAF0E6",
"magenta": "#FF00FF",
"maroon": "#800000",
"mediumaquamarine": "#66CDAA",
"mediumblue": "#0000CD",
"mediumorchid": "#BA55D3",
"mediumpurple": "#9370DB",
"mediumseagreen": "#3CB371",
"mediumslateblue": "#7B68EE",
"mediumspringgreen": "#00FA9A",
"mediumturquoise": "#48D1CC",
"mediumvioletred": "#C71585",
"midnightblue": "#191970",
"mintcream": "#F5FFFA",
"mistyrose": "#FFE4E1",
"moccasin": "#FFE4B5",
"navajowhite": "#FFDEAD",
"navy": "#000080",
"oldlace": "#FDF5E6",
"olive": "#808000",
"olivedrab": "#6B8E23",
"orange": "#FFA500",
"orangered": "#FF4500",
"orchid": "#DA70D6",
"palegoldenrod": "#EEE8AA",
"palegreen": "#98FB98",
"paleturquoise": "#AFEEEE",
"palevioletred": "#DB7093",
"papayawhip": "#FFEFD5",
"peachpuff": "#FFDAB9",
"peru": "#CD853F",
"pink": "#FFC0CB",
"plum": "#DDA0DD",
"powderblue": "#B0E0E6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#FF0000",
"rosybrown": "#BC8F8F",
"royalblue": "#4169E1",
"saddlebrown": "#8B4513",
"salmon": "#FA8072",
"sandybrown": "#F4A460",
"seagreen": "#2E8B57",
"seashell": "#FFF5EE",
"sienna": "#A0522D",
"silver": "#C0C0C0",
"skyblue": "#87CEEB",
"slateblue": "#6A5ACD",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#FFFAFA",
"springgreen": "#00FF7F",
"steelblue": "#4682B4",
"tan": "#D2B48C",
"teal": "#008080",
"thistle": "#D8BFD8",
"tomato": "#FF6347",
"turquoise": "#40E0D0",
"violet": "#EE82EE",
"wheat": "#F5DEB3",
"white": "#FFFFFF",
"whitesmoke": "#F5F5F5",
"yellow": "#FFFF00",
"yellowgreen": "#9ACD32",
}
ON_TRIGGER_ACTIONS = ["refresh", "refresh_and_freeze"]
POSITIONS = ["left", "center", "right"]
RETIRED_MODULES = {
"bitcoin_price": {
"new": ["coin_market"],
"msg": "Module {old} is no longer available due to unmaintained APIs. You can try a different module {new}.",
},
"nvidia_temp": {
"new": ["nvidia_smi"],
"msg": "Module {old} has been replaced with a module {new}.",
},
"scratchpad_async": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"scratchpad_counter": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title_async": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"weather_yahoo": {
"new": ["weather_owm"],
"msg": "Module {old} is no longer available due to retired Yahoo Weather APIs and new Oath requirements. You can try a different module {new}.",
},
"xkb_layouts": {
"new": ["xkb_input"],
"msg": "Module {old} has been replaced with a module {new} to support sway too.",
},
}
MARKUP_LANGUAGES = ["pango", "none"]
ON_ERROR_VALUES = ["hide", "show"]
| bsd-3-clause | e77ed097d53b03d76748f86a52cb7e21 | 26.873077 | 152 | 0.557058 | 2.722389 | false | false | false | false |
devilry/devilry-django | devilry/project/develop/testhelpers/datebuilder.py | 1 | 1191 | from datetime import datetime
from datetime import timedelta
DJANGO_ISODATETIMEFORMAT = 'Y-m-d H:i'
def isoformat_datetime(datetimeobj):
return datetimeobj.strftime('%Y-%m-%d %H:%M')
class DateTimeBuilder(datetime):
"""
Extends the builtin python :class:`datetime.datetime` with extra utility methods.
Examples::
tomorrow = DateTimeBuilder.now().plus(days=1)
yesterday_startofday = DateTimeBuilder.now().daystart().minus(days=1)
"""
def minus(self, weeks=0, days=0, hours=0, minutes=0, seconds=0):
self -= timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
return self
def plus(self, weeks=0, days=0, hours=0, minutes=0, seconds=0):
self += timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
return self
def daystart(self):
"""
Set the time to ``00:00:00``.
"""
self.replace(hour=0, minute=0, second=0, microsecond=0)
return self
def dayend(self):
"""
Set the time to ``23:59:59``.
"""
self.replace(hour=23, minute=59, second=59, microsecond=0)
return self
| bsd-3-clause | a8605eab1987181c6049b5a9d6f5d8c1 | 28.775 | 96 | 0.632242 | 3.620061 | false | false | false | false |
devilry/devilry-django | devilry/devilry_import_v2database/modelimporters/delivery_feedback_importers.py | 1 | 13855 | import os
import pprint
import sys
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import files
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import Candidate
from devilry.devilry_comment.models import Comment, CommentFile
from devilry.devilry_group.models import GroupComment, FeedbackSet
from devilry.devilry_import_v2database import modelimporter
from devilry.devilry_import_v2database.modelimporters import modelimporter_utils
from devilry.devilry_import_v2database.modelimporters.modelimporter_utils import BulkCreator
from devilry.utils import datetimeutils
class CommentFileFileDoesNotExist(Exception):
def __init__(self, filepath, comment_file):
self.filepath = filepath
self.comment_file = comment_file
def __str__(self):
message = \
'File {filepath!r}, CommentFile.id={commentfile_id}, v2_id={v2_id!r}. ' \
'Not writing file, this means that the CommentFile.file will be blank.'.format(
filepath=self.filepath,
v2_id=self.comment_file.v2_id,
commentfile_id=self.comment_file.id)
return message
class CommentFileIOError(Exception):
pass
class ImporterMixin(object):
def _get_feedback_set_from_id(self, feedback_set_id):
try:
feedback_set = FeedbackSet.objects.get(id=feedback_set_id)
except FeedbackSet.DoesNotExist:
raise modelimporter.ModelImporterException(
'FeedbackSet with id {} does not exist'.format(feedback_set_id))
return feedback_set
def _get_user_from_id(self, user_id):
user_model = get_user_model()
try:
user = user_model.objects.get(id=user_id)
except user_model.DoesNotExist:
raise modelimporter.ModelImporterException(
'User with id {} does not exist'.format(user_id))
return user
def _get_user_from_id_with_fallback(self, user_id, fallback=None):
user_model = get_user_model()
try:
user = user_model.objects.get(id=user_id)
except user_model.DoesNotExist:
return fallback
return user
class DeliveryImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return GroupComment
def get_model_super_class(self):
"""
Notes::
We use the Comment(which GroupComment inherits from) to be able to set the sequencing number
for Comment objects.
"""
return Comment
def _user_is_candidate_in_group(self, assignment_group, user):
group_queryset = AssignmentGroup.objects.filter(id=assignment_group.id).filter_user_is_candidate(user=user)
if group_queryset.count() == 0:
return False
return True
def _get_user_from_candidate_id(self, candidate_id):
try:
candidate = Candidate.objects.get(id=candidate_id)
except Candidate.DoesNotExist:
return None
else:
return candidate.relatedstudent.user
def _create_group_comment_from_object_dict(self, object_dict):
group_comment = self.get_model_class()()
self.patch_model_from_object_dict(
model_object=group_comment,
object_dict=object_dict,
attributes=[
'pk',
('pk', 'id'),
('time_of_delivery', 'created_datetime'),
('time_of_delivery', 'published_datetime')
]
)
feedback_set_id = object_dict['fields']['deadline']
group_comment.user = self._get_user_from_candidate_id(object_dict['fields']['delivered_by'])
group_comment.feedback_set_id = feedback_set_id
group_comment.text = 'Delivery'
group_comment.comment_type = GroupComment.COMMENT_TYPE_GROUPCOMMENT
group_comment.user_role = GroupComment.USER_ROLE_STUDENT
group_comment.v2_id = modelimporter_utils.make_flat_v2_id(object_dict)
if self.should_clean():
group_comment.full_clean()
group_comment.save()
return group_comment
def import_models(self, fake=False):
directory_parser = self.v2delivery_directoryparser
directory_parser.set_max_id_for_models_with_auto_generated_sequence_numbers(
model_class=self.get_model_super_class())
for object_dict in directory_parser.iterate_object_dicts():
if fake:
print(('Would import: {}'.format(pprint.pformat(object_dict))))
else:
self._create_group_comment_from_object_dict(object_dict=object_dict)
class StaticFeedbackImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return GroupComment
def _user_is_examiner_on_group(self, assignment_group, user):
group_queryset = AssignmentGroup.objects\
.filter(id=assignment_group.id)\
.filter_user_is_examiner(user=user)
if group_queryset.count() == 0:
return False
return True
def _save_and_publish_feedback_set(self, feedback_set, published_by, grading_points, publish_datetime):
"""
Publish Feedback.
"""
feedback_set.grading_published_by = published_by
feedback_set.grading_points = grading_points
feedback_set.grading_published_datetime = publish_datetime
if self.should_clean():
feedback_set.full_clean()
feedback_set.save()
def _create_feedback_comment_files(self, group_comment, staticfeedback_id, file_infos_dict):
"""
Create and save CommentFiles for each file uploaded by examiners in v2.
"""
if not isinstance(file_infos_dict, dict):
# Handle the slightly older format where the files where
# a list, not a dict - just to avoid crashes until we
# create a new dump. This can be removed later.
sys.stderr.write('x')
return []
commentfiles = []
for file_info_dict in list(file_infos_dict.values()):
mimetype = modelimporter_utils.get_mimetype_from_filename(file_info_dict['filename'])
comment_file = CommentFile(
comment=group_comment,
mimetype=mimetype,
filename=file_info_dict['filename'],
filesize=0,
v2_id=modelimporter_utils.make_staticfeedback_fileattachment_v2_id(
staticfeedback_id=staticfeedback_id,
attachment_id=file_info_dict['id'])
)
commentfiles.append(comment_file)
return commentfiles
def _create_group_comment_from_object_dict(self, object_dict):
group_comment = self.get_model_class()()
self.patch_model_from_object_dict(
model_object=group_comment,
object_dict=object_dict,
attributes=[
('save_timestamp', 'created_datetime'),
('save_timestamp', 'published_datetime')
]
)
feedback_set = self._get_feedback_set_from_id(feedback_set_id=object_dict['fields']['deadline_id'])
examiner_user = self._get_user_from_id_with_fallback(object_dict['fields']['saved_by'])
group_comment.user = examiner_user
self._save_and_publish_feedback_set(
feedback_set=feedback_set,
published_by=examiner_user,
grading_points=object_dict['fields']['points'],
publish_datetime=datetimeutils.from_isoformat(object_dict['fields']['save_timestamp'])
)
group_comment.feedback_set = feedback_set
group_comment.part_of_grading = True
group_comment.text = object_dict['fields']['rendered_view']
group_comment.comment_type = GroupComment.COMMENT_TYPE_GROUPCOMMENT
group_comment.user_role = GroupComment.USER_ROLE_EXAMINER
group_comment.v2_id = modelimporter_utils.make_flat_v2_id(object_dict)
if self.should_clean():
group_comment.full_clean()
group_comment.save()
commentfiles = self._create_feedback_comment_files(
group_comment,
staticfeedback_id=object_dict['pk'],
file_infos_dict=object_dict['fields']['files'])
self.log_create(model_object=group_comment, data=object_dict)
return group_comment, commentfiles
def import_models(self, fake=False):
with BulkCreator(model_class=CommentFile) as commentfile_bulk_creator:
for object_dict in self.v2staticfeedback_directoryparser.iterate_object_dicts():
if fake:
print(('Would import: {}'.format(pprint.pformat(object_dict))))
else:
group_comment, commentfiles = self._create_group_comment_from_object_dict(object_dict=object_dict)
if commentfiles:
commentfile_bulk_creator.add(*commentfiles)
class FileMetaImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return CommentFile
def _create_comment_file_from_object_id(self, object_dict):
comment_file = self.get_model_class()()
self.patch_model_from_object_dict(
model_object=comment_file,
object_dict=object_dict,
attributes=[
'filename'
]
)
comment_id = object_dict['fields']['delivery']
comment_file.comment_id = comment_id
comment_file.filesize = 0
comment_file.mimetype = modelimporter_utils.get_mimetype_from_filename(
filename=object_dict['fields'].get('filename', None))
comment_file.v2_id = modelimporter_utils.make_flat_v2_id(object_dict)
return comment_file
def import_models(self, fake=False):
with BulkCreator(model_class=CommentFile) as commentfile_bulk_creator:
for object_dict in self.v2filemeta_directoryparser.iterate_object_dicts():
if fake:
print(('Would import: {}'.format(pprint.pformat(object_dict))))
else:
comment_file = self._create_comment_file_from_object_id(object_dict=object_dict)
commentfile_bulk_creator.add(comment_file)
class CommentFileContentImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return CommentFile
def _write_file_to_commentfile(self, comment_file, filepath):
if not os.path.exists(filepath):
raise CommentFileFileDoesNotExist(filepath, comment_file)
comment_file.filesize = os.stat(filepath).st_size
fp = open(filepath, 'rb')
comment_file.file = files.File(fp, comment_file.filename)
if self.should_clean():
comment_file.full_clean()
try:
comment_file.save()
except IOError as error:
raise CommentFileIOError('Failed to write CommentFile#{commentfile_id}, filepath={filepath}: {error}'.format(
commentfile_id=comment_file.id,
filepath=filepath,
error=error))
fp.close()
def _copy_commentfile_file_from_filemeta(self, comment_file, v2idstring):
v2_delivery_file_root = getattr(settings, 'DEVILRY_V2_DELIVERY_FILE_ROOT', None)
if not v2_delivery_file_root:
return
object_dict = self.v2filemeta_directoryparser.get_object_dict_by_id(id=v2idstring)
filepath = os.path.join(v2_delivery_file_root,
object_dict['fields']['relative_file_path'])
self._write_file_to_commentfile(comment_file=comment_file,
filepath=filepath)
def _copy_commentfile_file_from_staticfeedbackfileattachment(self, comment_file, v2idstring):
v2_media_root = getattr(settings, 'DEVILRY_V2_MEDIA_ROOT', None)
if not v2_media_root:
return
staticfeedback_id, attachment_id = v2idstring.split('__')
object_dict = self.v2staticfeedback_directoryparser.get_object_dict_by_id(id=staticfeedback_id)
feedbackattachments = object_dict['fields'].get('files', None) or {}
attachment = feedbackattachments[attachment_id]
filepath = os.path.join(v2_media_root, attachment['relative_file_path'])
self._write_file_to_commentfile(comment_file=comment_file,
filepath=filepath)
def _copy_commentfile_file(self, comment_file):
v2model, v2idstring = comment_file.v2_id.split('__', 1)
if v2model == 'filemeta':
# Deliveries
self._copy_commentfile_file_from_filemeta(
comment_file=comment_file,
v2idstring=v2idstring)
elif v2model == 'staticfeedbackfileattachment':
# Attachments to feedbacks
self._copy_commentfile_file_from_staticfeedbackfileattachment(
comment_file=comment_file,
v2idstring=v2idstring)
else:
raise ValueError('Invalid v2model: {}'.format(v2model))
def import_models(self, fake=False):
does_not_exist = []
with modelimporter_utils.ProgressDots() as progressdots:
for comment_file in CommentFile.objects.exclude(v2_id='').iterator():
try:
self._copy_commentfile_file(comment_file)
except CommentFileFileDoesNotExist as error:
does_not_exist.append(error)
progressdots.increment_progress()
if does_not_exist:
print('Some of the source files did not exist.', file=sys.stderr)
for error in does_not_exist:
print('- {}'.format(error), file=sys.stderr)
| bsd-3-clause | 6bb88cabd685d48cd5b84c35ece5b4f6 | 41.894737 | 121 | 0.626416 | 3.974469 | false | false | false | false |
devilry/devilry-django | devilry/devilry_superadmin/management/commands/devilry_periodadd.py | 1 | 3417 | from datetime import datetime
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
class RecordSaveModCommand(BaseCommand):
def save_record(self, record, verbosity):
try:
record.full_clean()
except ValidationError as e:
errmsg = []
for key, messages in e.message_dict.items():
errmsg.append('{0}: {1}'.format(key, ' '.join(messages)))
raise CommandError('\n'.join(errmsg))
record.save()
if verbosity > 0:
print('"{0}" saved successfully.'.format(record.__class__.__name__, record))
class Command(RecordSaveModCommand):
# args = '<course short name> <period short_name>'
date_and_time_format = "%Y-%m-%dT%H:%M"
help = 'Create a new period.'
def add_arguments(self, parser):
parser.add_argument(
'subject_short_name',
help='Short name of the subject to add the period within.'
)
parser.add_argument(
'period_short_name',
help='Short name of the period you want to create.'
)
parser.add_argument(
'--long-name',
dest='long_name',
default=None,
required=True,
help='Long name of period (Required)'),
parser.add_argument(
'--start-time',
dest='start_time',
default=None,
required=True,
help='The start time of the period on the format specified in --datetime-format (Required)'),
parser.add_argument(
'--end-time',
dest='end_time',
default=None,
required=True,
help='The end time of the period on the format specified in --datetime-format (Required)'),
parser.add_argument(
'--datetime-format',
dest='datetime_format',
default=self.date_and_time_format,
help='The date format expressed in a format according to '
'strftime http://docs.python.org/library/datetime.html#strftime-strptime-behavior. '
'Defaults to YYYY-MM-DDThh:mm')
def handle(self, *args, **options):
from devilry.apps.core.models import Subject, Period
subject_short_name = options['subject_short_name']
period_short_name = options['period_short_name']
# Get the subject
try:
self.subject = Subject.objects.get(short_name=subject_short_name)
except Subject.DoesNotExist as e:
raise CommandError('Subject with short name %s does not exist.' % subject_short_name)
verbosity = int(options.get('verbosity', '1'))
date_and_time_format = options['datetime_format']
start_time = datetime.strptime(options['start_time'], date_and_time_format)
end_time = datetime.strptime(options['end_time'], date_and_time_format)
if Period.objects.filter(parentnode=self.subject, short_name=period_short_name).count() == 0:
period_long_name = options['long_name']
record = Period(short_name=period_short_name, long_name=period_long_name, parentnode=self.subject, start_time=start_time, end_time=end_time)
self.save_record(record, verbosity)
else:
raise CommandError('Period "{0}" already exists.'.format(period_short_name))
| bsd-3-clause | 11ed6baa02476740e2d77d4338b35e5c | 41.185185 | 152 | 0.599649 | 4.223733 | false | false | false | false |
devilry/devilry-django | devilry/devilry_gradeform/urls.py | 1 | 1110 | from django.urls import re_path
from devilry.devilry_gradeform.views.setup_create_gradeform import CreateGradeForm
urlpatterns = [
re_path(r'^create/(?P<assignment_id>[0-9]+)', CreateGradeForm.as_view(), name='create-setup-gradeform'),
# url(r'^$',
# grade_form.AdvancedGradeForm.render_setup(AdvancedGradeForm(), None),
# name='devilry-gradeform-setup'),
# url(r'^$',
# grade_form.AdvancedGradeForm.render_editable(AdvancedGradeForm(), None, None),
# name='devilry-gradeform-advanced-editable'),
]
# from django.conf.urls import url
# from devilry.devilry_gradeform.views.setup_create_gradeform import CreateGradeForm
# urlpatterns = [
# url(r'^create/(?P<assignment_id>[0-9]+)', CreateGradeForm.as_view(), name='create-setup-gradeform'),
# # url(r'^$',
# # grade_form.AdvancedGradeForm.render_setup(AdvancedGradeForm(), None),
# # name='devilry-gradeform-setup'),
# # url(r'^$',
# # grade_form.AdvancedGradeForm.render_editable(AdvancedGradeForm(), None, None),
# # name='devilry-gradeform-advanced-editable'),
# ]
| bsd-3-clause | ff14c6fa68693183ce74d16a90d2cc53 | 40.111111 | 108 | 0.673874 | 2.991914 | false | false | true | false |
devilry/devilry-django | devilry/utils/delivery_collection.py | 1 | 8118 | from django.utils.formats import date_format
from django.http import HttpResponse
from django.conf import settings
from .stream_archives import StreamableZip, StreamableTar
class ArchiveException(Exception):
"Archive exceptions"
def create_archive_from_assignmentgroups(assignmentgroups, file_name, archive_type):
"""
Creates an archive of type archive_type, named file_name, containing all the
deliveries in each of the assignmentgroups in the list assignmentgroups.
"""
archive = get_archive_from_archive_type(archive_type)
it = iter_archive_assignmentgroups(archive, assignmentgroups)
response = HttpResponse(it, content_type="application/%s" % archive_type)
response["Content-Disposition"] = "attachment; filename=%s.%s" % \
(file_name, archive_type)
return response
def create_archive_from_delivery(delivery, archive_type):
"""
Creates an archive of type archive_type, named assignment.get_path(),
containing all files in the delivery.
"""
archive = get_archive_from_archive_type(archive_type)
group = delivery.assignment_group
assignment = group.parentnode
group_name = _get_assignmentgroup_name(group)
it = iter_archive_deliveries(archive, group_name, assignment.get_path(), [delivery])
response = HttpResponse(it, content_type="application/%s" % archive_type)
response["Content-Disposition"] = "attachment; filename=%s.%s" % \
(assignment.get_path(), archive_type)
return response
def iter_archive_deliveries(archive, group_name, directory_prefix, deliveries):
"""
Adds files one by one from the list of deliveries into the archive.
After writing each file to the archive, the new bytes in the archive
is yielded. If a file is bigger than DEVILRY_MAX_ARCHIVE_CHUNK_SIZE,
only DEVILRY_MAX_ARCHIVE_CHUNK_SIZE bytes are written before it's yielded.
The returned object is an iterator.
"""
include_delivery_explanation = False
if len(deliveries) > 1:
include_delivery_explanation = True
multiple_deliveries_content = "Delivery-ID File count Total size"\
" Delivery time \r\n"
for delivery in deliveries:
metas = delivery.filemetas.all()
delivery_size = 0
for f in metas:
delivery_size += f.size
filename = "%s/%s/%s" % (directory_prefix, group_name,
f.filename)
if include_delivery_explanation:
filename = "%s/%s/%d/%s" % (directory_prefix, group_name,
delivery.number, f.filename)
# File size is greater than DEVILRY_MAX_ARCHIVE_CHUNK_SIZE bytes
# Write only chunks of size DEVILRY_MAX_ARCHIVE_CHUNK_SIZE to the archive
if f.size > settings.DEVILRY_MAX_ARCHIVE_CHUNK_SIZE:
if not archive.can_write_chunks():
raise ArchiveException("The size of file %s is greater than "\
"the maximum allowed size. Download "\
"stream aborted.")
chunk_size = settings.DEVILRY_MAX_ARCHIVE_CHUNK_SIZE
# Open file stream for reading
file_to_stream = f.read_open()
# Open a filestream in the archive
archive.open_filestream(filename, f.size)
for i in inclusive_range(chunk_size, f.size, chunk_size):
bytes = file_to_stream.read(chunk_size)
archive.append_file_chunk(bytes, len(bytes))
# Read the chunk from the archive and yield the data
yield archive.read()
archive.close_filestream()
else:
bytes = f.read_open().read(f.size)
archive.add_file(filename, bytes)
# Read the content from the streamable archive and yield the data
yield archive.read()
if include_delivery_explanation:
multiple_deliveries_content += " %3d %3d %5d"\
"%s\r\n" % \
(delivery.number, len(metas),
delivery_size,
date_format(delivery.time_of_delivery,
"DATETIME_FORMAT"))
# Adding file explaining multiple deliveries
if include_delivery_explanation:
archive.add_file("%s/%s/%s" %
(directory_prefix, group_name,
"Deliveries.txt"),
multiple_deliveries_content.encode("ascii"))
def iter_archive_assignmentgroups(archive, assignmentgroups):
"""
Creates an archive, adds files delivered by the assignmentgroups
and yields the data.
"""
name_matches = _get_dictionary_with_name_matches(assignmentgroups)
for group in assignmentgroups:
group_name = _get_assignmentgroup_name(group)
# If multiple groups with the same members exists,
# postfix the name with assignmentgroup ID.
if name_matches[group_name] > 1:
group_name = "%s+%d" % (group_name, group.id)
deliveries = group.deliveries.all()
for bytes in iter_archive_deliveries(archive, group_name,
group.parentnode.get_path(),
deliveries):
yield bytes
archive.close()
yield archive.read()
def get_archive_from_archive_type(archive_type):
"""
Checks that the archive_type is either zip, tar or tar.gz,
and return the correct archive class.
"""
archive = None
if archive_type == 'zip':
archive = StreamableZip()
elif archive_type == 'tar' or archive_type == 'tgz' or archive_type == 'tar.gz':
archive = StreamableTar(archive_type)
else:
raise ArchiveException("archive_type is invalid:%s" % archive_type)
return archive
def verify_groups_not_exceeding_max_file_size(assignmentgroups):
"""
For each assignmentgroups in groups, calls
:meth:`verify_deliveries_not_exceeding_max_file_size`. If the size of a file
in a delivery exceeds the settings.DEVILRY_MAX_ARCHIVE_CHUNK_SIZE, an
ArchiveException is raised.
"""
for ag in assignmentgroups:
verify_deliveries_not_exceeding_max_file_size(ag.deliveries.all())
def verify_deliveries_not_exceeding_max_file_size(deliveries):
"""
Goes through all the files in each deliverery, and if the size of a file
exceeds the DEVILRY_MAX_ARCHIVE_CHUNK_SIZE, an ArchiveException is raised.
"""
max_size = settings.DEVILRY_MAX_ARCHIVE_CHUNK_SIZE
for d in deliveries:
for f_meta in d.filemetas.all():
if f_meta.size > max_size:
raise ArchiveException()
def inclusive_range(start, stop, step=1):
"""
A range() clone, but this includes the right limit
as is if the last step doesn't divide on stop
"""
l = []
x = start
while x <= stop:
l.append(x)
x += step
if x > stop:
l.append(stop)
return l
def _get_assignmentgroup_name(assigmentgroup):
"""
Returns a string containing the group members of the
assignmentgroup separated by '-'.
"""
cands = assigmentgroup.get_candidates()
cands = cands.replace(", ", "-")
return cands
def _get_dictionary_with_name_matches(assignmentgroups):
"""
Takes a list of assignmentgroups and returns
a dictionary containing the count of groups
with similar names sa values in the dictionary.
"""
matches = {}
for assigmentgroup in assignmentgroups:
name = _get_assignmentgroup_name(assigmentgroup)
if name in matches:
matches[name] = matches[name] + 1
else:
matches[name] = 1
return matches
| bsd-3-clause | 2d8c5751b4b9493ac964fa3eb8b2bd84 | 40 | 88 | 0.598916 | 4.279389 | false | false | false | false |
devilry/devilry-django | devilry/devilry_import_v2database/tests/test_modelimporters/test_periodimporter.py | 1 | 9128 | import unittest
from django import test
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from model_bakery import baker
from devilry.apps.core.models import Period
from devilry.devilry_account import models as account_models
from devilry.devilry_account.models import SubjectPermissionGroup
from devilry.devilry_import_v2database.modelimporters.period_importer import PeriodImporter
from devilry.devilry_import_v2database.models import ImportedModel
from devilry.utils import datetimeutils
from .importer_testcase_mixin import ImporterTestCaseMixin
@unittest.skip('Not relevant anymore, keep for history.')
class TestPeriodImporter(ImporterTestCaseMixin, test.TestCase):
def _create_model_meta(self):
return {
'model_class_name': 'Period',
'max_id': 16,
'app_label': 'core'
}
def _create_period_dict(self, subject, test_admin_user=None):
return {
'pk': 1,
'model': 'core.period',
'admin_user_ids': [test_admin_user.id] if test_admin_user else [],
'fields': {
'short_name': 'testsemester',
'start_time': '2017-02-14T11:04:46.585',
'parentnode': subject.id,
'long_name': 'Testsemester',
'admins': [test_admin_user.id] if test_admin_user else [],
'etag': '2017-05-15T11:04:46.585',
'end_time': '2017-08-13T11:04:46.585'
}
}
def test_import(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
self.assertEqual(Period.objects.count(), 1)
def test_importer_pk(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
period = Period.objects.first()
self.assertEqual(period.id, 1)
def test_importer_imported_model_with_admins(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
period_data_dict = self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user)
self.create_v2dump(model_name='core.period',
data=period_data_dict)
subjectimporter = PeriodImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(SubjectPermissionGroup.objects.count(), 1)
def test_importer_imported_model_without_admins(self):
test_subject = baker.make('core.Subject')
period_data_dict = self._create_period_dict(subject=test_subject)
self.create_v2dump(model_name='core.period',
data=period_data_dict)
subjectimporter = PeriodImporter(input_root=self.temp_root_dir)
subjectimporter.import_models()
self.assertEqual(SubjectPermissionGroup.objects.count(), 0)
def test_importer_short_name(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
period = Period.objects.first()
self.assertEqual(period.short_name, 'testsemester')
def test_importer_long_name(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
period = Period.objects.first()
self.assertEqual(period.long_name, 'Testsemester')
def test_importer_start_time(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
period = Period.objects.first()
start_time = datetimeutils.from_isoformat('2017-02-14T11:04:46.585')
self.assertEqual(period.start_time, start_time)
def test_importer_end_time(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
period = Period.objects.first()
end_time = datetimeutils.from_isoformat('2017-08-13T11:04:46.585')
self.assertEqual(period.end_time, end_time)
def test_importer_subject(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
period = Period.objects.first()
self.assertEqual(period.parentnode, test_subject)
def test_importer_permissiongroup_is_created(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user))
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
self.assertEqual(Period.objects.count(), 1)
period = Period.objects.first()
self.assertEqual(account_models.PermissionGroup.objects.count(), 1)
self.assertEqual(account_models.PermissionGroupUser.objects.count(), 1)
self.assertEqual(account_models.SubjectPermissionGroup.objects.count(), 1)
periods_for_admin_list = Period.objects.filter_user_is_admin(test_admin_user)
self.assertEqual(len(periods_for_admin_list), 1)
self.assertEqual(periods_for_admin_list[0], period)
# def test_importer_imported_model_log_created(self):
# test_admin_user = baker.make(settings.AUTH_USER_MODEL)
# test_subject = baker.make('core.Subject')
# period_data_dict = self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user)
# self.create_v2dump(model_name='core.period',
# data=period_data_dict)
# periodimporter = PeriodImporter(input_root=self.temp_root_dir)
# periodimporter.import_models()
# period = Period.objects.first()
# self.assertEquals(ImportedModel.objects.count(), 1)
# imported_model = ImportedModel.objects.get(
# content_object_id=period.id,
# content_type=ContentType.objects.get_for_model(model=period)
# )
# self.assertEquals(imported_model.content_object, period)
# self.assertEquals(imported_model.data, period_data_dict)
def test_auto_sequence_numbered_objects_uses_meta_max_id(self):
test_admin_user = baker.make(settings.AUTH_USER_MODEL)
test_subject = baker.make('core.Subject')
self.create_v2dump(model_name='core.period',
data=self._create_period_dict(subject=test_subject, test_admin_user=test_admin_user),
model_meta=self._create_model_meta())
periodimporter = PeriodImporter(input_root=self.temp_root_dir)
periodimporter.import_models()
self.assertEqual(Period.objects.count(), 1)
period = Period.objects.first()
self.assertEqual(period.pk, 1)
self.assertEqual(period.id, 1)
period_with_auto_id = baker.make('core.Period')
self.assertEqual(period_with_auto_id.id, self._create_model_meta()['max_id']+1)
self.assertEqual(period_with_auto_id.pk, self._create_model_meta()['max_id']+1)
| bsd-3-clause | 9847f1e32c6b76f71fa41ef952813b91 | 49.994413 | 112 | 0.656661 | 3.528411 | false | true | false | false |
devilry/devilry-django | devilry/utils/anonymize_database.py | 1 | 6282 | # -*- coding: utf-8 -*-
import random
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import CharField
from django.db.models.functions import Concat
from devilry.devilry_account.models import UserEmail, UserName
from devilry.devilry_comment.models import Comment, CommentEditHistory, CommentFile
lorem_ipsum = """Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Sed malesuada sagittis ipsum, quis malesuada sem placerat non.
Donec metus urna, consectetur a laoreet vitae, dapibus sit amet odio.
Morbi facilisis, nisl ut pellentesque consectetur, ipsum ante varius lectus, sed commodo felis metus
sagittis neque. Donec vitae tortor magna.
Nullam in massa quis sapien dignissim ullamcorper et quis urna.
Aenean facilisis quis mauris a porttitor.
Integer accumsan dolor sagittis sem sagittis sollicitudin quis bibendum diam.
Suspendisse malesuada, neque quis condimentum elementum, odio arcu pulvinar erat,
vitae imperdiet mauris turpis eu magna."""
class AnonymizeDatabaseException(Exception):
def __init__(self, message, *args, **kwargs):
self.message = message
super(AnonymizeDatabaseException, self).__init__(*args, **kwargs)
class AnonymizeDatabase(object):
"""
Anonymizes:
- User
- UserEmail
- UserNames
- Comments (comment text)
- CommentFiles (filename)
Generate a anonymized name based on the unanonymized name
and keeps the same length.
Replaces:
- Digits
- lower- and uppercase letters.
Does not replace:
- spaces
- hyphens and underscores
- special characters
Args:
unanonymized_string: The string to anonymize.
"""
FALLBACK = 'empty'
LETTERS = 'abcdefghijklmnopqrstuvwxyz'
DIGITS = '0123456789'
NOOP_CHARACTERS = [' ', '_', '@', '-', '"']
def __init__(self, fast=True):
self.fast = fast
def is_uppercase(self, character):
"""
Check if character is uppercase.
"""
return character.isupper()
def get_random_character(self, exclude_character):
"""
Randomize the character.
"""
if exclude_character.isdigit():
character_list = list(self.DIGITS)
else:
character_list = list(self.LETTERS)
character_list.remove(exclude_character.lower())
random_choice = random.choice(character_list)
if exclude_character.isupper():
return random_choice.upper()
return random_choice.lower()
def randomize_string(self, unanonymized_string):
"""
Start randomizing the string.
Returns:
str: Randomized string.
"""
if len(unanonymized_string) == 0 or unanonymized_string is None:
return self.FALLBACK
anonymized_string = ''
for character in list(unanonymized_string):
if character in self.NOOP_CHARACTERS:
anonymized_string += character
else:
anonymized_string += self.get_random_character(
exclude_character=character)
return anonymized_string
def __anonymize_user_data_fast(self):
"""
Simply sets usernames to the ID of the user.
"""
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
get_user_model().objects.update(
fullname='Full Name',
lastname='Lastname',
shortname=Concat(models.F('id'), models.Value('@example.com'), output_field=CharField()))
else:
get_user_model().objects.update(
fullname='Full Name',
lastname='Lastname',
shortname=Concat(models.F('id'), models.Value(''), output_field=CharField()))
UserEmail.objects.update(
email=Concat(models.F('user_id'), models.Value('_'), models.F('id'),
models.Value('@example.com'), output_field=CharField()))
UserName.objects.update(
username=Concat(models.F('user_id'), models.Value('_'),
models.F('id'), output_field=CharField()))
def __anonymize_user_emails(self, user):
for user_email in UserEmail.objects.filter(user_id=user.id):
email_prefix = user_email.email.split('@')[0]
anonymized_email_prefix = self.randomize_string(unanonymized_string=email_prefix)
user_email.email = '{}@example.com'.format(anonymized_email_prefix)
user_email.save()
def __anonymize_user_names(self, user):
for user_name in UserName.objects.filter(user_id=user.id):
anonymized_user_name = self.randomize_string(unanonymized_string=user_name.username)
user_name.username = anonymized_user_name
user_name.save()
def __anonymize_user_data(self):
for user in get_user_model().objects.all():
shortname = user.shortname
if '@' in shortname:
shortname = shortname.split('@')[0]
anonymized_shortname = self.randomize_string(unanonymized_string=shortname)
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
anonymized_shortname += '@example.com'
user.shortname = anonymized_shortname
user.fullname = self.randomize_string(unanonymized_string=user.fullname)
user.lastname = self.randomize_string(unanonymized_string=user.lastname)
user.save()
self.__anonymize_user_emails(user=user)
self.__anonymize_user_names(user=user)
def anonymize_user(self):
if self.fast:
self.__anonymize_user_data_fast()
else:
self.__anonymize_user_data()
def __anonymize_comments_fast(self):
"""
Anonymize Comment text and CommentFile filenames.
"""
Comment.objects.update(text=lorem_ipsum)
CommentEditHistory.objects.update(post_edit_text=lorem_ipsum, pre_edit_text=lorem_ipsum)
CommentFile.objects.update(filename=Concat(models.F('id'), models.Value(''), output_field=CharField()))
def anonymize_comments(self):
self.__anonymize_comments_fast()
| bsd-3-clause | f192efa1eea88b435c584eeaaf2315ff | 36.171598 | 111 | 0.630054 | 3.730404 | false | false | false | false |
devilry/devilry-django | devilry/devilry_dbcache/tests/test_groupcomment_triggers.py | 1 | 5750 | # -*- coding: utf-8 -*-
from django import test
from django.conf import settings
from django.core.files.base import ContentFile
from model_bakery import baker
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_comment.models import Comment, CommentEditHistory
from devilry.devilry_group.models import GroupComment, GroupCommentEditHistory
class TestGroupCommentTriggers(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_delete(self):
testcomment = baker.make('devilry_group.GroupComment')
testcomment_id = testcomment.id
testcommentfile = baker.make('devilry_comment.CommentFile',
comment=testcomment)
testcommentfile.file.save('testfile.txt', ContentFile('test'))
testcomment.delete()
self.assertFalse(GroupComment.objects.filter(id=testcomment_id).exists())
class TestGroupCommentEditTrigger(test.TransactionTestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_group_comment_edit_history_not_created_on_create_sanity(self):
baker.make('devilry_group.GroupComment')
self.assertEqual(GroupCommentEditHistory.objects.count(), 0)
self.assertEqual(CommentEditHistory.objects.count(), 0)
def test_group_comment_edit_history_created_on_update_sanity(self):
group_comment = baker.make('devilry_group.GroupComment')
group_comment.save()
self.assertEqual(GroupCommentEditHistory.objects.count(), 1)
self.assertEqual(CommentEditHistory.objects.count(), 1)
def test_updated_fields(self):
user = baker.make(settings.AUTH_USER_MODEL)
group_comment = baker.make('devilry_group.GroupComment',
text='Comment text', user=user)
group_comment.text = 'Comment text edited'
group_comment.save()
self.assertEqual(GroupCommentEditHistory.objects.count(), 1)
self.assertEqual(CommentEditHistory.objects.count(), 1)
group_comment_edit_history = GroupCommentEditHistory.objects.get()
self.assertEqual(group_comment_edit_history.group_comment, group_comment)
self.assertEqual(group_comment_edit_history.pre_edit_text, 'Comment text')
self.assertEqual(group_comment_edit_history.post_edit_text, 'Comment text edited')
self.assertEqual(group_comment_edit_history.edited_by, user)
def test_group_comment_history_comment_history_no_duplicates(self):
user = baker.make(settings.AUTH_USER_MODEL)
group_comment = baker.make('devilry_group.GroupComment',
text='Comment text 1', user=user)
group_comment.text = 'Comment text 2'
group_comment.save()
self.assertEqual(GroupCommentEditHistory.objects.count(), 1)
self.assertEqual(CommentEditHistory.objects.count(), 1)
def test_group_comment_history_points_to_comment_history(self):
user = baker.make(settings.AUTH_USER_MODEL)
group_comment = baker.make('devilry_group.GroupComment',
text='Comment text 1', user=user)
group_comment.text = 'Comment text 2'
group_comment.save()
self.assertEqual(GroupCommentEditHistory.objects.count(), 1)
self.assertEqual(CommentEditHistory.objects.count(), 1)
groupcommentedit_history = GroupCommentEditHistory.objects.get()
commentedit_history = CommentEditHistory.objects.get()
self.assertEqual(groupcommentedit_history.id, commentedit_history.id)
self.assertEqual(groupcommentedit_history.commentedithistory_ptr_id, commentedit_history.id)
self.assertEqual(groupcommentedit_history.commentedithistory_ptr, commentedit_history)
def test_multiple_updates(self):
user = baker.make(settings.AUTH_USER_MODEL)
group_comment = baker.make('devilry_group.GroupComment',
text='Comment text 1', user=user)
group_comment.text = 'Comment text 2'
group_comment.save()
group_comment.text = 'Comment text 3'
group_comment.save()
self.assertEqual(GroupCommentEditHistory.objects.count(), 2)
self.assertEqual(CommentEditHistory.objects.count(), 2)
groupcommentedit_history = GroupCommentEditHistory.objects.order_by('edited_datetime')
commentedit_history = CommentEditHistory.objects.order_by('edited_datetime')
self.assertEqual(groupcommentedit_history[0].commentedithistory_ptr_id, commentedit_history[0].id)
self.assertEqual(groupcommentedit_history[1].commentedithistory_ptr_id, commentedit_history[1].id)
# Test for CommentEditHistory entries
self.assertEqual(commentedit_history[0].pre_edit_text, 'Comment text 1')
self.assertEqual(commentedit_history[0].post_edit_text, 'Comment text 2')
self.assertEqual(commentedit_history[1].pre_edit_text, 'Comment text 2')
self.assertEqual(commentedit_history[1].post_edit_text, 'Comment text 3')
# Test for GroupCommentEditHistory entries. This is basically the same as the checks above
# but Django makes it seem as GroupCommentEditHistory has the fields pre_edit_text and post_edit_text
# when Django actually joins the table of the superclass and does a fk related lookup.
self.assertEqual(groupcommentedit_history[0].pre_edit_text, 'Comment text 1')
self.assertEqual(groupcommentedit_history[0].post_edit_text, 'Comment text 2')
self.assertEqual(groupcommentedit_history[1].pre_edit_text, 'Comment text 2')
self.assertEqual(groupcommentedit_history[1].post_edit_text, 'Comment text 3')
| bsd-3-clause | 582fb11ad4158343f1793465a85b9290 | 52.738318 | 109 | 0.704522 | 3.869448 | false | true | false | false |
devilry/devilry-django | devilry/devilry_email/feedback_email/feedback_email.py | 1 | 6466 | from django.utils.translation import gettext_lazy
import django_rq
from cradmin_legacy.crinstance import reverse_cradmin_url
from devilry.devilry_message.models import Message
from devilry.devilry_message.utils.subject_generator import SubjectTextGenerator
from devilry.devilry_email.utils import get_student_users_in_group
class FeedbackSubjectTextGenerator(SubjectTextGenerator):
def __init__(self, assignment, feedback_type=None):
self.assignment = assignment
self.feedback_type = feedback_type
super(FeedbackSubjectTextGenerator, self).__init__()
def get_subject_text(self):
if not self.feedback_type:
raise ValueError('Missing mailtype')
if self.feedback_type == 'feedback_created':
return gettext_lazy('Feedback for %(assignment_name)s') % {
'assignment_name': self.assignment.long_name}
if self.feedback_type == 'feedback_edited':
return gettext_lazy('Feedback updated for %(assignment_name)s') % {
'assignment_name': self.assignment.long_name}
def send_feedback_email(assignment, feedback_sets, domain_url_start, feedback_type):
"""
Send a feedback mail to all students in and :class:`~.devilry.apps.core.models.AssignmentGroup` for
a :class:`~.devilry.devilry_group.models.FeedbackSet`.
Here's what this method does, step-by-step:
1. Creates a :class:`~.devilry.devilry_message.models.Message` for each
`FeedbackSet`.
2. Calls :meth:`~.devilry.devilry_message.models.Message.prepare_and_send` which
generates and sends an email to each user.
Args:
assignment: An instance of :class:`~.devilry.apps.core.models.Assignment`
where deadlines changed.
feedback_sets: An iterable containing :class:`~.devilry.devilry_group.models.FeedbackSet`s
that have their deadlines changed.
domain_url_start: The domain address, e.g: "www.example.com".
feedback_type: The type of feedback, is it a new grading or has the grading been update.
"""
from devilry.apps.core.models import Assignment
template_name = 'devilry_email/feedback_email/assignment_feedback_student.txt'
if feedback_type == 'feedback_created':
message_context_type = Message.CONTEXT_TYPE_CHOICES.FEEDBACK.value
elif feedback_type == 'feedback_edited':
message_context_type = Message.CONTEXT_TYPE_CHOICES.FEEDBACK_UPDATED.value
else:
message_context_type = Message.CONTEXT_TYPE_CHOICES.OTHER.value
subject_generator = FeedbackSubjectTextGenerator(assignment=assignment, feedback_type=feedback_type)
assignment = Assignment.objects \
.prefetch_point_to_grade_map() \
.filter(id=assignment.id)\
.get()
for feedback_set in feedback_sets:
student_users = list(get_student_users_in_group(feedback_set.group))
if len(student_users) == 0:
return
user_ids = [user.id for user in student_users]
# Build absolute url
domain_url_start = domain_url_start.rstrip('/')
absolute_url = '{}{}'.format(
domain_url_start,
reverse_cradmin_url(
instanceid='devilry_group_student',
appname='feedbackfeed',
roleid=feedback_set.group_id))
template_dictionary = {
'assignment': assignment,
'devilryrole': 'student',
'points': feedback_set.grading_points,
'deadline_datetime': feedback_set.deadline_datetime,
'corrected_datetime': feedback_set.grading_published_datetime,
'url': absolute_url
}
# Create message object and save it.
message = Message(
virtual_message_receivers={'user_ids': user_ids},
context_type=message_context_type,
metadata={
'points': feedback_set.grading_points,
'grading_published_atetime': feedback_set.grading_published_datetime.isoformat(),
'feedbackset_id': feedback_set.id,
'assignment_group_id': feedback_set.group_id,
'assignment_id': feedback_set.group.parentnode_id
},
message_type=['email']
)
message.full_clean()
message.save()
# Prepare receivers and send.
message.prepare_and_send(
subject_generator=subject_generator,
template_name=template_name,
template_context=template_dictionary)
def bulk_feedback_mail(assignment_id, feedbackset_id_list, domain_url_start, feedback_type=None):
"""
Fetches necessary data, and calls :meth:`.send_feedback_email`. Works as an interface
for :meth:`bulk_send_feedback_created_email` and :meth:`bulk_send_feedback_edited_email`.
Args:
assignment_id: The :class:`~.devilry.apps.core.models.Assignment` the feedbacks where given on.
feedbackset_id_list: A list of :class:`~.devilry.devilry_group.models.FeedbackSet`
domain_url_start: Domain url.
feedback_type: The type of feedback, is it a new grading or has the grading been update.
"""
from devilry.devilry_group.models import FeedbackSet
from devilry.apps.core.models import Assignment
feedbackset_queryset = FeedbackSet.objects\
.select_related('group', 'group__parentnode', 'group__parentnode__parentnode')\
.filter(id__in=feedbackset_id_list)
assignment = Assignment.objects.get(id=assignment_id)
send_feedback_email(
assignment=assignment,
feedback_sets=feedbackset_queryset,
domain_url_start=domain_url_start,
feedback_type=feedback_type
)
def bulk_send_feedback_created_email(**kwargs):
"""
Queue RQ job for sending out notifications to users when receive
feedback.
Adds :meth:`bulk_feedback_mail` to the RQ-queue.
"""
kwargs.update({
'feedback_type': 'feedback_created'
})
queue = django_rq.get_queue(name='email')
queue.enqueue(bulk_feedback_mail, **kwargs)
def bulk_send_feedback_edited_email(**kwargs):
"""
Queue RQ job for sending out notifications to users when their feedback
has been edited.
Adds :meth:`bulk_feedback_mail` to the RQ-queue.
"""
kwargs.update({
'feedback_type': 'feedback_edited'
})
queue = django_rq.get_queue(name='email')
queue.enqueue(bulk_feedback_mail, **kwargs)
| bsd-3-clause | 723e82b2a87434de250eeb5de7f42d69 | 39.4125 | 104 | 0.657903 | 3.95716 | false | false | false | false |
devilry/devilry-django | devilry/devilry_admin/views/subject/crinstance_subject.py | 1 | 2615 | from devilry.apps.core.models import Subject
from devilry.devilry_account.models import SubjectPermissionGroup
from devilry.devilry_cradmin import devilry_crinstance
from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin
from devilry.devilry_admin.views.subject import admins
from devilry.devilry_admin.views.subject import createperiod
from devilry.devilry_admin.views.subject import overview
from devilry.devilry_admin.views.subject import edit
class Menu(devilry_crmenu_admin.Menu):
def build_menu(self):
super(Menu, self).build_menu()
subject = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_subject_breadcrumb_item(subject=subject, active=True)
class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin):
menuclass = Menu
roleclass = Subject
apps = [
('overview', overview.App),
('admins', admins.App),
('createperiod', createperiod.App),
('edit', edit.App),
]
id = 'devilry_admin_subjectadmin'
rolefrontpage_appname = 'overview'
def get_rolequeryset(self):
return Subject.objects.filter_user_is_admin(user=self.request.user)
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is a Subject.
"""
subject = role
return subject
@classmethod
def matches_urlpath(cls, urlpath):
return urlpath.startswith('/devilry_admin/subject')
def __get_devilryrole_for_requestuser(self):
subject = self.request.cradmin_role
devilryrole = SubjectPermissionGroup.objects.get_devilryrole_for_user_on_subject(
user=self.request.user,
subject=subject
)
if devilryrole is None:
raise ValueError('Could not find a devilryrole for request.user. This must be a bug in '
'get_rolequeryset().')
return devilryrole
def get_devilryrole_for_requestuser(self):
"""
Get the devilryrole for the requesting user on the current
subject (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.SubjectPermissionGroupQuerySet.get_devilryrole_for_user_on_subject`,
exept that this method raises ValueError if it does not find a role.
"""
if not hasattr(self, '_devilryrole_for_requestuser'):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser()
return self._devilryrole_for_requestuser
| bsd-3-clause | 13590d0e866331afb1f92274295e747d | 36.898551 | 114 | 0.68413 | 3.688293 | false | false | false | false |
devilry/devilry-django | devilry/devilry_group/migrations/0032_auto_20180214_1654.py | 1 | 4638 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-14 15:54
import devilry.apps.core.models.custom_db_fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('devilry_group', '0031_groupcomment_v2_id'),
]
operations = [
migrations.AddField(
model_name='feedbacksetpassedpreviousperiod',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passed_previous_period_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='feedbacksetpassedpreviousperiod',
name='created_datetime',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='feedbacksetpassedpreviousperiod',
name='passed_previous_period_type',
field=models.CharField(choices=[('manual', 'manual'), ('auto', 'auto')], default='auto', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='assignment_long_name',
field=devilry.apps.core.models.custom_db_fields.LongNameField(blank=True, db_index=True, default='', max_length=100, verbose_name='Name'),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='assignment_max_points',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='assignment_passing_grade_min_points',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='assignment_short_name',
field=devilry.apps.core.models.custom_db_fields.ShortNameField(blank=True, default='', help_text='Up to 20 letters of lowercase english letters (a-z), numbers, underscore ("_") and hyphen ("-"). Used when the name takes too much space.', max_length=20, verbose_name='Short name'),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='feedbackset',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='devilry_group.FeedbackSet'),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='grading_points',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='grading_published_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='passed_previous_period_published_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='grading_published_datetime',
field=models.DateTimeField(default=None, null=True),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='period_end_time',
field=models.DateTimeField(default=None, null=True),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='period_long_name',
field=devilry.apps.core.models.custom_db_fields.LongNameField(blank=True, db_index=True, default='', max_length=100, verbose_name='Name'),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='period_short_name',
field=devilry.apps.core.models.custom_db_fields.ShortNameField(blank=True, default='', help_text='Up to 20 letters of lowercase english letters (a-z), numbers, underscore ("_") and hyphen ("-"). Used when the name takes too much space.', max_length=20, verbose_name='Short name'),
),
migrations.AlterField(
model_name='feedbacksetpassedpreviousperiod',
name='period_start_time',
field=models.DateTimeField(default=None, null=True),
),
]
| bsd-3-clause | cb6f4323ed56057ed7f3191f0fdda404 | 47.3125 | 292 | 0.646615 | 4.239488 | false | false | false | false |
devilry/devilry-django | devilry/devilry_group/admin.py | 1 | 3612 | from django.contrib import admin
from devilry.devilry_group import models
class FeedbackSetAdmin(admin.ModelAdmin):
list_display = [
'id',
'group_id',
'get_students',
'deadline_datetime',
'feedbackset_type',
'grading_published_datetime',
'grading_points'
]
raw_id_fields = [
'group',
'created_by',
'last_updated_by',
'grading_published_by'
]
readonly_fields = [
'feedbackset_type',
'gradeform_data_json'
]
search_fields = [
'=group__id',
'group__parentnode__short_name',
'group__parentnode__long_name',
'group__parentnode__parentnode__short_name',
'group__parentnode__parentnode__long_name',
'group__parentnode__parentnode__parentnode__short_name',
'group__parentnode__parentnode__parentnode__long_name',
'group__candidates__relatedstudent__user__shortname'
]
list_filter = [
'created_datetime',
'deadline_datetime',
'grading_published_datetime'
]
def get_students(self, obj):
return obj.group.get_unanonymized_short_displayname()
get_students.short_description = 'Students'
admin.site.register(models.FeedbackSet, FeedbackSetAdmin)
class GroupCommentAdmin(admin.ModelAdmin):
list_display = [
'id',
'user',
'part_of_grading'
]
raw_id_fields = [
'feedback_set'
]
admin.site.register(models.GroupComment, GroupCommentAdmin)
class FeedbackSetPassedPreviousPeriodAdmin(admin.ModelAdmin):
readonly_fields = [
'feedbackset',
'passed_previous_period_type',
'assignment_short_name',
'assignment_long_name',
'assignment_max_points',
'assignment_passing_grade_min_points',
'period_short_name',
'period_long_name',
'period_start_time',
'period_end_time',
'grading_points',
'grading_published_by',
'grading_published_datetime',
'created_datetime',
'created_by',
]
search_fields = [
'feedbackset_id',
'created_by_id',
'passed_previous_period_type'
]
list_display = [
'feedbackset'
]
admin.site.register(models.FeedbacksetPassedPreviousPeriod, FeedbackSetPassedPreviousPeriodAdmin)
class FeedbackSetGradingUpdateHistoryAdmin(admin.ModelAdmin):
readonly_fields = [
'feedback_set',
'updated_by',
'updated_datetime',
'old_grading_points',
'old_grading_published_by',
'old_grading_published_datetime'
]
search_fields = [
'feedbackset_id',
'updated_by_id',
'old_grading_published_by_id'
]
list_display = [
'feedback_set',
'updated_by'
]
admin.site.register(models.FeedbackSetGradingUpdateHistory, FeedbackSetGradingUpdateHistoryAdmin)
class FeedbackSetDeadlineHistoryAdmin(admin.ModelAdmin):
readonly_fields = [
'feedback_set',
'changed_by',
'changed_datetime',
'deadline_old',
'deadline_new'
]
search_fields = [
'feedback_set_id',
'changed_by_id'
]
list_display = [
'feedback_set',
'changed_datetime'
]
admin.site.register(models.FeedbackSetDeadlineHistory, FeedbackSetDeadlineHistoryAdmin)
class GroupCommentEditHistoryAdmin(admin.ModelAdmin):
list_display = [
'visibility',
'edited_datetime',
'edited_by'
]
admin.site.register(models.GroupCommentEditHistory, GroupCommentEditHistoryAdmin)
| bsd-3-clause | 4c24f0a416ceb71dea0ffe039f3d5360 | 23.405405 | 97 | 0.611296 | 3.786164 | false | false | false | false |
devilry/devilry-django | devilry/utils/tests/test_groups_groupedby_relatedstudent_and_assignment.py | 1 | 4771 | import unittest
from django.test import TestCase
from devilry.apps.core.testhelper import TestHelper
from devilry.utils.groups_groupedby_relatedstudent_and_assignment import GroupsGroupedByRelatedStudentAndAssignment
@unittest.skip('Tests must be rewritten with baker.')
class GroupsGroupedByRelatedStudentAndAssignmentTest(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes="uni",
subjects=["sub"],
periods=["p1:begins(-24):ends(6)"],
assignments=['a1:pub(1)', 'a3:pub(4)', 'a2:pub(3)'],
assignmentgroups=["stud1:candidate(student1):examiner(examiner1)",
"stud2:candidate(student2):examiner(examiner1)",
"stud3:candidate(student3):examiner(examiner1)",
"stud4:candidate(student4,student3):examiner(examiner1)"],
deadlines=['d1:ends(1)'])
def _add_good_feedback(self, group):
delivery = self.testhelper.add_delivery(group, {"good.py": "print awesome"})
self.testhelper.add_feedback(delivery, {'grade': '100/100', 'points': 100, 'is_passing_grade': True})
def _add_bad_feedback(self, group):
delivery = self.testhelper.add_delivery(group, {"bad.py": "print bad"})
self.testhelper.add_feedback(delivery, {'grade': '0/100', 'points': 0, 'is_passing_grade': False})
def test_iter_assignments(self):
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
shortnames = [assignment.short_name for assignment in grouper.iter_assignments()]
self.assertEqual(shortnames, ['a1', 'a2', 'a3'])
def test_iter_students_that_is_candidate_but_not_in_related_none(self):
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
self.assertEqual(len(list(grouper.iter_relatedstudents_with_results())), 0)
def test_iter_students_that_is_candidate_but_not_in_related(self):
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
ignored = list(grouper.iter_students_that_is_candidate_but_not_in_related())
self.assertEqual(len(ignored), 4)
student1_info = None
for aggregated_relstudentinfo in ignored:
if aggregated_relstudentinfo.user == self.testhelper.student1:
student1_info = aggregated_relstudentinfo
break
grouplists = list(student1_info.iter_groups_by_assignment())
self.assertEqual(len(grouplists), 3) # Should match the number of assignments
self.assertEqual(len(grouplists[0]), 1)
self.assertEqual(grouplists[0][0].feedback, None)
def test_iter_students_with_feedback_that_is_candidate_but_not_in_related_none(self):
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
self.assertEqual(len(list(grouper.iter_students_with_feedback_that_is_candidate_but_not_in_related())), 0)
self.assertEqual(len(list(grouper.iter_relatedstudents_with_results())), 0)
def test_iter_students_with_feedback_that_is_candidate_but_not_in_related(self):
self._add_bad_feedback(self.testhelper.sub_p1_a1_stud1)
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
ignored = list(grouper.iter_students_with_feedback_that_is_candidate_but_not_in_related())
self.assertEqual(len(ignored), 1)
grouplists = list(ignored[0].iter_groups_by_assignment())
self.assertEqual(len(grouplists), 3) # Should match the number of assignments
self.assertEqual(grouplists[0][0].feedback.grade, '0/100')
def test_iter_relatedstudents_with_results(self):
self._add_bad_feedback(self.testhelper.sub_p1_a1_stud1)
self.testhelper.sub_p1.relatedstudent_set.create(user=self.testhelper.student1)
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
results = list(grouper.iter_relatedstudents_with_results())
self.assertEqual(len(results), 1)
student1info = results[0]
grouplists = list(student1info.iter_groups_by_assignment())
self.assertEqual(len(grouplists), 3) # Should match the number of assignments
self.assertEqual(grouplists[0].get_best_gradestring(), '0/100')
def test_iter_relatedstudents_with_results_multi(self):
self.testhelper.sub_p1.relatedstudent_set.create(user=self.testhelper.student1)
self.testhelper.sub_p1.relatedstudent_set.create(user=self.testhelper.student2)
grouper = GroupsGroupedByRelatedStudentAndAssignment(self.testhelper.sub_p1)
results = list(grouper.iter_relatedstudents_with_results())
self.assertEqual(len(results), 2) | bsd-3-clause | 24f8f86cbd457900b558be83dc001cbf | 55.809524 | 115 | 0.69608 | 3.571108 | false | true | false | false |
devilry/devilry-django | devilry/devilry_admin/views/period/students.py | 1 | 12467 |
from cradmin_legacy import crapp
from cradmin_legacy.crispylayouts import DangerSubmit
from django.contrib import messages
from django.http import Http404
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy
from devilry.apps.core.models import RelatedStudent, Assignment
from devilry.devilry_account.models import PermissionGroup, User
from devilry.devilry_admin.cradminextensions.listbuilder import listbuilder_relatedstudent
from devilry.devilry_admin.cradminextensions.listfilter import listfilter_relateduser
from devilry.devilry_admin.views.common import bulkimport_users_common
from devilry.devilry_cradmin import devilry_multiselect2
from devilry.devilry_cradmin.viewhelpers import devilry_confirmview
class GetQuerysetForRoleMixin(object):
model = RelatedStudent
def get_queryset_for_role(self, role):
period = role
return self.model.objects \
.filter(period=period) \
.order_by('user__shortname')
class OverviewItemValue(listbuilder_relatedstudent.ReadOnlyItemValue):
template_name = 'devilry_admin/period/students/overview-itemvalue.django.html'
class Overview(listbuilder_relatedstudent.VerticalFilterListView):
value_renderer_class = OverviewItemValue
template_name = 'devilry_admin/period/students/overview.django.html'
def get_period(self):
return self.request.cradmin_role
def add_filterlist_items(self, filterlist):
super(Overview, self).add_filterlist_items(filterlist=filterlist)
filterlist.append(listfilter_relateduser.IsActiveFilter())
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'filter',
kwargs={'filters_string': filters_string})
def get_unfiltered_queryset_for_role(self, role):
period = role
return self.model.objects \
.filter(period=period)\
.prefetch_syncsystemtag_objects()\
.select_related('user').order_by('user__shortname')
def __user_is_department_admin(self):
requestuser_devilryrole = self.request.cradmin_instance.get_devilryrole_for_requestuser()
return requestuser_devilryrole == PermissionGroup.GROUPTYPE_DEPARTMENTADMIN
def get_context_data(self, **kwargs):
context = super(Overview, self).get_context_data(**kwargs)
context['period'] = self.request.cradmin_role
context['user_is_department_admin'] = self.__user_is_department_admin()
return context
class SingleRelatedStudentMixin(GetQuerysetForRoleMixin):
def dispatch(self, request, *args, **kwargs):
try:
self.relatedstudent = self.get_queryset_for_role(role=self.request.cradmin_role)\
.select_related('user')\
.get(id=kwargs['pk'])
except RelatedStudent.DoesNotExist:
raise Http404()
return super(SingleRelatedStudentMixin, self).dispatch(request, *args, **kwargs)
class DeactivateView(SingleRelatedStudentMixin, devilry_confirmview.View):
"""
View used to deactivate students from a period.
"""
def get_pagetitle(self):
return gettext_lazy('Deactivate student: %(user)s?') % {
'user': self.relatedstudent.user.get_full_name(),
}
def get_confirm_message(self):
period = self.request.cradmin_role
return gettext_lazy(
'Are you sure you want to make %(user)s '
'an inactive student for %(period)s? Inactive students '
'can not be added to new assignments, but they still have access '
'to assignments that they have already been granted access to. Inactive '
'students are clearly marked with warning messages throughout the student, examiner '
'and admin UI, but students and examiners are not notified in any way when you '
'deactivate a student. You can re-activate a deactivated student at any time.'
) % {
'user': self.relatedstudent.user.get_full_name(),
'period': period.get_path(),
}
def get_submit_button_label(self):
return gettext_lazy('Deactivate')
def get_submit_button_class(self):
return DangerSubmit
def get_backlink_url(self):
return self.request.cradmin_app.reverse_appindexurl()
def get_backlink_label(self):
return gettext_lazy('Back to students on semester overview')
def __get_success_message(self):
return gettext_lazy('%(user)s was deactivated.') % {
'user': self.relatedstudent.user.get_full_name(),
}
def form_valid(self, form):
self.relatedstudent.active = False
self.relatedstudent.save()
messages.success(self.request, self.__get_success_message())
return super(DeactivateView, self).form_valid(form=form)
class ActivateView(SingleRelatedStudentMixin, devilry_confirmview.View):
def get_context_data(self, **kwargs):
context = super(ActivateView, self).get_context_data(**kwargs)
context['period'] = self.request.cradmin_role
return context
def get_pagetitle(self):
return gettext_lazy('Re-activate student: %(user)s?') % {
'user': self.relatedstudent.user.get_full_name()
}
def get_submit_button_label(self):
return gettext_lazy('Re-activate')
def get_confirm_message(self):
return gettext_lazy('Please confirm that you want to re-activate %(user)s.') % {
'user': self.relatedstudent.user.get_full_name()
}
def get_backlink_url(self):
return self.request.cradmin_app.reverse_appindexurl()
def get_backlink_label(self):
return gettext_lazy('Back to students on semester overview')
def get_success_url(self):
return str(self.request.cradmin_app.reverse_appindexurl())
def __get_success_message(self):
return gettext_lazy('%(user)s was re-activated.') % {
'user': self.relatedstudent.user.get_full_name()
}
def form_valid(self, form):
self.relatedstudent.active = True
self.relatedstudent.save()
messages.success(self.request, self.__get_success_message())
return super(ActivateView, self).form_valid(form=form)
class AddStudentsTarget(devilry_multiselect2.user.Target):
def get_submit_button_text(self):
return gettext_lazy('Add selected students')
def students_not_added_to_assignments_warning(request, period):
assignment_queryset = Assignment.objects.filter(parentnode=period)
if not assignment_queryset.exists():
return
message = render_to_string('devilry_admin/period/students/students-not-added-to-assignments-warning.django.html', {
'assignment_queryset': assignment_queryset
})
messages.info(request, message)
class AddView(devilry_multiselect2.user.BaseMultiselectUsersView):
value_renderer_class = devilry_multiselect2.user.ItemValue
template_name = 'devilry_admin/period/students/add.django.html'
model = User
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'add', kwargs={'filters_string': filters_string})
def __get_userids_already_relatedstudent_queryset(self):
period = self.request.cradmin_role
return RelatedStudent.objects.filter(period=period)\
.values_list('user_id', flat=True)
def get_unfiltered_queryset_for_role(self, role):
return super(AddView, self).get_unfiltered_queryset_for_role(role=self.request.cradmin_role)\
.exclude(id__in=self.__get_userids_already_relatedstudent_queryset())
def get_target_renderer_class(self):
return AddStudentsTarget
def get_context_data(self, **kwargs):
context = super(AddView, self).get_context_data(**kwargs)
context['period'] = self.request.cradmin_role
return context
def get_success_url(self):
return str(self.request.cradmin_app.reverse_appindexurl())
def __get_success_message(self, added_users):
added_users_names = ['"{}"'.format(user.get_full_name()) for user in added_users]
added_users_names.sort()
return gettext_lazy('Added %(usernames)s.') % {
'usernames': ', '.join(added_users_names)
}
def __create_relatedstudents(self, selected_users):
period = self.request.cradmin_role
relatedstudents = []
for user in selected_users:
relatedstudent = RelatedStudent(
period=period,
user=user)
relatedstudents.append(relatedstudent)
RelatedStudent.objects.bulk_create(relatedstudents)
def form_valid(self, form):
selected_users = list(form.cleaned_data['selected_items'])
self.__create_relatedstudents(selected_users=selected_users)
messages.success(self.request, self.__get_success_message(added_users=selected_users))
students_not_added_to_assignments_warning(
request=self.request,
period=self.request.cradmin_role)
return super(AddView, self).form_valid(form=form)
class ImportStudentsView(bulkimport_users_common.AbstractTypeInUsersView):
create_button_label = gettext_lazy('Bulk import students')
def get_backlink_url(self):
return self.request.cradmin_app.reverse_appindexurl()
def get_backlink_label(self):
return gettext_lazy('Back to students on semester overview')
def get_pagetitle(self):
return gettext_lazy('Bulk import students')
def import_users_from_emails(self, emails):
period = self.request.cradmin_role
result = RelatedStudent.objects.bulk_create_from_emails(period=period, emails=emails)
if result.new_relatedusers_was_created():
messages.success(self.request, gettext_lazy('Added %(count)s new students to %(period)s.') % {
'count': result.created_relatedusers_queryset.count(),
'period': period.get_path()
})
students_not_added_to_assignments_warning(
request=self.request,
period=self.request.cradmin_role)
else:
messages.warning(self.request, gettext_lazy('No new students was added.'))
if result.existing_relateduser_emails_set:
messages.info(self.request, gettext_lazy('%(count)s users was already student on %(period)s.') % {
'count': len(result.existing_relateduser_emails_set),
'period': period.get_path()
})
def import_users_from_usernames(self, usernames):
period = self.request.cradmin_role
result = RelatedStudent.objects.bulk_create_from_usernames(period=period, usernames=usernames)
if result.new_relatedusers_was_created():
messages.success(self.request, gettext_lazy('Added %(count)s new students to %(period)s.') % {
'count': result.created_relatedusers_queryset.count(),
'period': period.get_path()
})
students_not_added_to_assignments_warning(
request=self.request,
period=self.request.cradmin_role)
else:
messages.warning(self.request, gettext_lazy('No new students was added.'))
if result.existing_relateduser_usernames_set:
messages.info(self.request, gettext_lazy('%(count)s users was already student on %(period)s.') % {
'count': len(result.existing_relateduser_usernames_set),
'period': period.get_path()
})
class App(crapp.App):
appurls = [
crapp.Url(r'^$',
Overview.as_view(),
name=crapp.INDEXVIEW_NAME),
crapp.Url(r'^filter/(?P<filters_string>.+)?$',
Overview.as_view(),
name='filter'),
crapp.Url(r'^deactivate/(?P<pk>\d+)$',
DeactivateView.as_view(),
name="deactivate"),
crapp.Url(r'^activate/(?P<pk>\d+)$',
ActivateView.as_view(),
name="activate"),
crapp.Url(r'^add/(?P<filters_string>.+)?$',
AddView.as_view(),
name="add"),
crapp.Url(r'^importstudents',
ImportStudentsView.as_view(),
name="importstudents"),
]
| bsd-3-clause | 96f36157af2a336b58276c67654db575 | 39.346278 | 119 | 0.652523 | 3.882591 | false | false | false | false |
devilry/devilry-django | devilry/apps/core/tests/test_candidate.py | 1 | 8276 | from datetime import timedelta
import mock
from django import test
from django.utils import timezone
from model_bakery import baker
from devilry.apps.core.models import Candidate
class TestCandidateQuerySet(test.TestCase):
def test_filter_has_passing_grade(self):
testassignment = baker.make('core.Assignment',
passing_grade_min_points=1)
passingfeecbackset = baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now(),
group__parentnode=testassignment,
grading_points=1)
baker.make('devilry_group.FeedbackSet',
group__parentnode=testassignment,
grading_published_datetime=timezone.now(),
grading_points=0)
testcandidate = baker.make('core.Candidate', assignment_group=passingfeecbackset.group)
self.assertEqual(
[testcandidate],
list(Candidate.objects.filter_has_passing_grade(assignment=testassignment)))
def test_filter_has_passing_grade_unpublished_ignored(self):
testassignment = baker.make('core.Assignment',
passing_grade_min_points=1)
unpublished_feedbackset = baker.make('devilry_group.FeedbackSet',
grading_published_datetime=None,
group__parentnode=testassignment,
grading_points=1)
baker.make('core.Candidate', assignment_group=unpublished_feedbackset.group)
self.assertEqual(
[],
list(Candidate.objects.filter_has_passing_grade(assignment=testassignment)))
def test_filter_has_passing_grade_unpublished_ignored_but_has_older_published(self):
testassignment = baker.make('core.Assignment',
passing_grade_min_points=1)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testcandidate = baker.make('core.Candidate', assignment_group=testgroup)
baker.make('devilry_group.FeedbackSet',
group=testgroup,
grading_published_datetime=timezone.now() - timedelta(days=2),
grading_points=1)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=None,
group=testgroup,
grading_points=0)
self.assertEqual(
[testcandidate],
list(Candidate.objects.filter_has_passing_grade(assignment=testassignment)))
def test_filter_has_passing_grade_correct_feedbackset_ordering1(self):
testassignment = baker.make('core.Assignment',
passing_grade_min_points=1)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testcandidate = baker.make('core.Candidate', assignment_group=testgroup)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now() - timedelta(days=2),
group=testgroup,
grading_points=0)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now(),
group=testgroup,
grading_points=1)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now() - timedelta(days=3),
group=testgroup,
grading_points=0)
self.assertEqual(
[testcandidate],
list(Candidate.objects.filter_has_passing_grade(assignment=testassignment)))
def test_filter_has_passing_grade_correct_feedbackset_ordering2(self):
testassignment = baker.make('core.Assignment',
passing_grade_min_points=1)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
baker.make('core.Candidate', assignment_group=testgroup)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now() - timedelta(days=2),
group=testgroup,
grading_points=1)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now(),
group=testgroup,
grading_points=0)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now() - timedelta(days=3),
group=testgroup,
grading_points=1)
self.assertEqual(
[],
list(Candidate.objects.filter_has_passing_grade(assignment=testassignment)))
def test_filter_has_passing_grade_not_within_assignment(self):
testassignment = baker.make('core.Assignment',
passing_grade_min_points=1)
testgroup = baker.make('core.AssignmentGroup')
baker.make('core.Candidate', assignment_group=testgroup)
baker.make('devilry_group.FeedbackSet',
grading_published_datetime=timezone.now(),
group=testgroup,
grading_points=1)
self.assertEqual(
[],
list(Candidate.objects.filter_has_passing_grade(assignment=testassignment)))
class TestCandidateModel(test.TestCase):
def test_get_anonymous_name_uses_custom_candidate_ids_true_no_candidate_id(self):
candidate = baker.make('core.Candidate',
assignment_group__parentnode__uses_custom_candidate_ids=True)
self.assertEqual('Automatic anonymous ID missing', candidate.get_anonymous_name())
def test_get_anonymous_name_assignment_argument(self):
candidate = baker.make('core.Candidate')
mockassignment = mock.MagicMock()
mock_uses_custom_candidate_ids = mock.PropertyMock(return_value=True)
type(mockassignment).uses_custom_candidate_ids = mock_uses_custom_candidate_ids
candidate.get_anonymous_name(assignment=mockassignment)
mock_uses_custom_candidate_ids.assert_called_once_with()
def test_get_anonymous_name_uses_custom_candidate_ids_true_with_candidate_id(self):
candidate = baker.make('core.Candidate',
assignment_group__parentnode__uses_custom_candidate_ids=True,
candidate_id='MyCandidateId')
self.assertEqual('MyCandidateId', candidate.get_anonymous_name())
def test_get_anonymous_name_uses_custom_candidate_ids_false_with_candidate_id(self):
candidate = baker.make('core.Candidate',
assignment_group__parentnode__uses_custom_candidate_ids=False,
relatedstudent__candidate_id='MyCandidateId')
self.assertEqual('MyCandidateId', candidate.get_anonymous_name())
def test_get_anonymous_name_uses_custom_candidate_ids_false_ignores_candidate_candidate_id(self):
candidate = baker.make('core.Candidate',
assignment_group__parentnode__uses_custom_candidate_ids=False,
candidate_id='ignored',
relatedstudent__candidate_id='MyCandidateId')
self.assertEqual('MyCandidateId', candidate.get_anonymous_name())
def test_get_anonymous_name_uses_custom_candidate_ids_false_with_anonymous_id(self):
candidate = baker.make('core.Candidate',
assignment_group__parentnode__uses_custom_candidate_ids=False,
relatedstudent__automatic_anonymous_id='MyAutomaticID')
self.assertEqual('MyAutomaticID', candidate.get_anonymous_name())
def test_get_anonymous_name_uses_custom_candidate_ids_false_no_anonymous_id(self):
candidate = baker.make('core.Candidate',
assignment_group__parentnode__uses_custom_candidate_ids=False,
relatedstudent__automatic_anonymous_id='')
self.assertEqual('Automatic anonymous ID missing', candidate.get_anonymous_name())
| bsd-3-clause | d59a03d754ab26c159c145b1435a875b | 52.051282 | 101 | 0.610077 | 4.454252 | false | true | false | false |
devilry/devilry-django | devilry/devilry_admin/views/assignment/deadline_handling.py | 1 | 4829 |
from crispy_forms import layout
from django import forms
from django.http import Http404
from django.utils.translation import pgettext_lazy
from cradmin_legacy.viewhelpers.crudbase import OnlySaveButtonMixin
from cradmin_legacy.viewhelpers.update import UpdateView
from devilry.apps.core import models as coremodels
from devilry.apps.core.models import Assignment
from devilry.devilry_account.models import SubjectPermissionGroup
class AssignmentDeadlineHandlingUpdateView(OnlySaveButtonMixin, UpdateView):
model = coremodels.Assignment
fields = ['deadline_handling']
template_name = 'devilry_cradmin/viewhelpers/devilry_updateview_with_backlink.django.html'
def dispatch(self, request, *args, **kwargs):
assignment = Assignment.objects.get(id=kwargs['pk'])
subject = assignment.parentnode.parentnode
user_devilry_role = SubjectPermissionGroup.objects\
.get_devilryrole_for_user_on_subject(user=request.user, subject=subject)
if user_devilry_role is None:
raise Http404()
return super(AssignmentDeadlineHandlingUpdateView, self).dispatch(request=request, *args, **kwargs)
def __get_deadline_handling_choices(self):
return [
(
Assignment.DEADLINEHANDLING_SOFT,
pgettext_lazy('deadline handling update hard choice',
'SOFT. Students will be able to add upload deliveries and comment after '
'the deadline has expired, but this will be clearly highlighted. Deliveries made after '
'the deadline has expired might not be considered when an examiner is correcting '
'deliveries.')
),
(
Assignment.DEADLINEHANDLING_HARD,
pgettext_lazy('deadline handling update hard choice',
'HARD. Students will not be able to upload deliveries or comment after the deadline has '
'expired. This can only be reverted by setting the deadline handling to soft, extending '
'the deadline or give a new attempt. A highlighted box will appear in the top of the '
'delivery feed informing the user that the assignment uses hard deadlines.')
),
]
def get_form(self, form_class=None):
form = super(AssignmentDeadlineHandlingUpdateView, self).get_form()
form.fields['deadline_handling'].widget = forms.RadioSelect()
form.fields['deadline_handling'].choices = self.__get_deadline_handling_choices()
form.fields['deadline_handling'].help_text = None
return form
def get_pagetitle(self):
return pgettext_lazy('assignment config', "Edit deadline handling")
def get_queryset_for_role(self, role):
return self.model.objects.filter(id=self.request.cradmin_role.id)
def post(self, request, *args, **kwargs):
self._old_deadline_handling = self.get_object().deadline_handling
return super(AssignmentDeadlineHandlingUpdateView, self).post(request, *args, **kwargs)
def get_deadline_handling_soft_text(self):
return pgettext_lazy('deadline update view deadline soft text', 'SOFT')
def get_deadline_handling_hard_text(self):
return pgettext_lazy('deadline update view deadline hard text', 'HARD')
def get_deadline_handling_text(self, assignment):
if assignment.deadline_handling == 0:
return self.get_deadline_handling_soft_text()
return self.get_deadline_handling_hard_text()
def get_success_message(self, object):
assignment = object
if self._old_deadline_handling == Assignment.DEADLINEHANDLING_SOFT:
old_deadline_handling_text = self.get_deadline_handling_soft_text()
new_deadline_handling_text = self.get_deadline_handling_text(assignment=assignment)
else:
old_deadline_handling_text = self.get_deadline_handling_hard_text()
new_deadline_handling_text = self.get_deadline_handling_text(assignment=assignment)
return pgettext_lazy(
'assignment config',
'Changed deadline handling from "%(old_deadline_handling_text)s" '
'to "%(new_deadline_handling_text)s".'
) % {
'old_deadline_handling_text': old_deadline_handling_text,
'new_deadline_handling_text': new_deadline_handling_text
}
def get_backlink_url(self):
return self.request.cradmin_instance.rolefrontpage_url()
def get_context_data(self, **kwargs):
context = super(AssignmentDeadlineHandlingUpdateView, self).get_context_data(**kwargs)
context['backlink_url'] = self.get_backlink_url()
return context
| bsd-3-clause | 2c50d8c0d095d020335c0768b8cf11fa | 47.29 | 119 | 0.663698 | 4.258377 | false | false | false | false |
devilry/devilry-django | devilry/devilry_admin/views/assignment/students/manage_deadlines.py | 1 | 1329 | # -*- coding: utf-8 -*-
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from devilry.devilry_deadlinemanagement.cradmin_app import AdminDeadlineManagementApp
from devilry.devilry_deadlinemanagement.views import deadline_listview
from devilry.devilry_deadlinemanagement.views import manage_deadline_view
class AdminDeadlineListView(deadline_listview.DeadlineListView):
def get_backlink_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_assignmentadmin',
appname='studentoverview',
roleid=self.request.cradmin_instance.assignment.id,
viewname=crapp.INDEXVIEW_NAME
)
class AdminManageDeadlineFromPreviousView(manage_deadline_view.ManageDeadlineFromPreviousView):
def get_success_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_assignmentadmin',
appname='studentoverview',
roleid=self.request.cradmin_instance.assignment.id,
viewname=crapp.INDEXVIEW_NAME
)
class App(AdminDeadlineManagementApp):
@classmethod
def get_index_view_class(cls):
return AdminDeadlineListView
@classmethod
def get_manage_deadline_from_previous_view_class(cls):
return AdminManageDeadlineFromPreviousView
| bsd-3-clause | 06e58195f5a5aab5a9eefa8f0178fb4e | 33.076923 | 95 | 0.734387 | 3.808023 | false | false | false | false |
devilry/devilry-django | devilry/devilry_student/views/period/overview.py | 1 | 3973 |
from django.utils.translation import gettext_lazy, pgettext_lazy
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from devilry.apps.core import models as coremodels
from devilry.apps.core.models import Assignment
from devilry.devilry_cradmin import devilry_listbuilder
class GroupItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
valuealias = 'group'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_group_student',
appname='feedbackfeed',
roleid=self.group.id,
viewname=crapp.INDEXVIEW_NAME,
)
def get_extra_css_classes_list(self):
return ['devilry-student-listbuilder-grouplist-itemframe']
class PeriodOverviewView(listbuilderview.FilterListMixin,
listbuilderview.View):
model = coremodels.AssignmentGroup
value_renderer_class = devilry_listbuilder.assignmentgroup.StudentItemValue
frame_renderer_class = GroupItemFrame
paginate_by = 15
template_name = 'devilry_student/period/overview.django.html'
def __get_assignment_id_to_assignment_map(self):
assignmentqueryset = Assignment.objects\
.filter_student_has_access(user=self.request.user)\
.filter(parentnode=self.request.cradmin_role)\
.select_related('parentnode__parentnode')\
.prefetch_point_to_grade_map()
assignment_id_to_assignment_map = {}
for assignment in assignmentqueryset:
assignment_id_to_assignment_map[assignment.id] = assignment
return assignment_id_to_assignment_map
def get_value_and_frame_renderer_kwargs(self):
kwargs = super(PeriodOverviewView, self).get_value_and_frame_renderer_kwargs()
kwargs.update({
'assignment_id_to_assignment_map': self.__get_assignment_id_to_assignment_map(),
'include_periodpath': False
})
return kwargs
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
crapp.INDEXVIEW_NAME,
kwargs={'filters_string': filters_string})
def add_filterlist_items(self, filterlist):
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label=gettext_lazy('Search'),
label_is_screenreader_only=True,
modelfields=[
'parentnode__long_name',
'parentnode__short_name',
'parentnode__parentnode__long_name',
'parentnode__parentnode__short_name',
'parentnode__parentnode__parentnode__long_name',
'parentnode__parentnode__parentnode__short_name',
]))
def get_unfiltered_queryset_for_role(self, role):
period = role
return coremodels.AssignmentGroup.objects\
.filter(parentnode__parentnode=period)\
.filter_student_has_access(user=self.request.user)\
.distinct()\
.select_related(
'parentnode',
'cached_data__last_published_feedbackset',
'cached_data__last_feedbackset',
'cached_data__first_feedbackset',
)\
.order_by('-cached_data__last_feedbackset__deadline_datetime')
def get_no_items_message(self):
return pgettext_lazy('student period overview',
'No assignments.')
def get_context_data(self, **kwargs):
context = super(PeriodOverviewView, self).get_context_data(**kwargs)
context['period'] = self.request.cradmin_role
return context
class App(crapp.App):
appurls = [
crapp.Url(r'^(?P<filters_string>.+)?$$',
PeriodOverviewView.as_view(),
name=crapp.INDEXVIEW_NAME),
]
| bsd-3-clause | 3cf3214136987515b0fad39a64f29841 | 37.201923 | 92 | 0.639064 | 3.961117 | false | false | false | false |
devilry/devilry-django | devilry/django_decoupled_docs/registry.py | 1 | 2123 | from django.conf import settings
class DocProxy(object):
def __init__(self, **languages):
self.urls = {}
self.addmany(**languages)
def addmany(self, **languages):
for languagecode, url in languages.items():
self.add_for_language(languagecode, url)
def add_for_language(self, languagecode, url):
self.urls[languagecode] = url
def __getitem__(self, languagecode):
return self.urls[languagecode]
class VersionedReadTheDocsDocProxyBase(DocProxy):
#: The name of the project (the first path of the URL). Must be difined in subclasses.
projectname = None
def add_for_language(self, languagecode, path):
url = 'http://{projectname}.readthedocs.org/{languagecode}/{version}/{path}'.format(
projectname=self.projectname,
languagecode=languagecode,
version=self.get_current_version(),
path=path)
super(VersionedReadTheDocsDocProxyBase, self).add_for_language(languagecode, url)
def get_current_version(self):
return 'latest'
class DocumentationRegistry(object):
def __init__(self):
self._defaults = {}
self._overrides = {}
def _get_document(self, registrydict, documentid, languagecode):
proxy = registrydict[documentid]
return proxy[languagecode]
def get(self, documentid, languagecode):
documenturl = None
default_languagecode = getattr(settings, 'DJANGO_DECOUPLED_DOCS_DEFAULT_LANGUAGECODE', 'en')
for lang in (languagecode, default_languagecode):
try:
documenturl = self._get_document(self._overrides, documentid, lang)
except KeyError:
try:
documenturl = self._get_document(self._defaults, documentid, lang)
except KeyError:
pass
return documenturl
def add(self, documentid, proxy):
self._defaults[documentid] = proxy
def override(self, documentid, proxy):
self._overrides[documentid] = proxy
documentationregistry = DocumentationRegistry() | bsd-3-clause | 75866506100d69416314c3200c096c7e | 30.235294 | 100 | 0.637306 | 4.20396 | false | false | false | false |
devilry/devilry-django | devilry/devilry_i18n/middleware.py | 1 | 1322 | from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils import translation
from django.utils.deprecation import MiddlewareMixin
class LocaleMiddleware(MiddlewareMixin):
"""
Locale selecting middleware that will look at the languagecode in the
DevilryUserProfile of the authenticated user.
Must be added to ``settings.MIDDLEWARE`` after
``django.contrib.auth.middleware.AuthenticationMiddleware``.
"""
def _get_language(self, request):
if request.user.is_authenticated:
languagecode = request.user.languagecode
if languagecode:
languages_dict = dict(settings.LANGUAGES)
if languagecode in languages_dict:
return languagecode
return settings.LANGUAGE_CODE
def process_request(self, request):
language = self._get_language(request)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
translation.deactivate()
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
| bsd-3-clause | 839873f435656f62eed12ae85e0a05ab | 35.722222 | 73 | 0.686082 | 4.914498 | false | false | false | false |
devilry/devilry-django | devilry/devilry_superadmin/management/commands/devilry_anonymize_database.py | 1 | 2214 | from allauth.socialaccount.models import SocialAccount, SocialToken, SocialApp
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction
from devilry.devilry_comment.models import Comment
from devilry.utils import anonymize_database
class Command(BaseCommand):
"""
Management script for anonymizing the database.
"""
help = 'Anonymize the entire database.'
def add_arguments(self, parser):
parser.add_argument(
'--fast',
action='store_true',
default=False,
help='Fast mode sets all identifiers such as name, emails, usernames as the IDs of the object. '
'Passing this flag as False will generate random characters the same length of the username. '
)
def handle(self, *args, **options):
fast = options['fast']
with transaction.atomic():
call_command('ievvtasks_customsql', '--clear')
self.stdout.write('Anonymizing the database...')
db_anonymizer = anonymize_database.AnonymizeDatabase(fast=fast)
self.stdout.write('Anonymizing all users... ({})'.format(get_user_model().objects.count()))
db_anonymizer.anonymize_user()
self.stdout.write('Anonymizing all comments and files...({})'.format(Comment.objects.count()))
db_anonymizer.anonymize_comments()
self.stdout.write('(Dataporten) Deleting all SocialAccounts ({})'.format(
SocialAccount.objects.count()))
SocialAccount.objects.all().delete()
self.stdout.write('(Dataporten) Deleting all SocialTokens ({})'.format(
SocialToken.objects.count()))
SocialToken.objects.all().delete()
self.stdout.write('(Dataporten) Deleting all SocialApplications ({})'.format(
SocialApp.objects.count()))
SocialApp.objects.all().delete()
call_command('ievvtasks_customsql', '-i', '-r')
self.stdout.write('Setting all passwords to test')
call_command('ievvtasks_set_all_passwords_to_test')
| bsd-3-clause | 61b9a86debc7f6d2baf3555613decaa6 | 41.576923 | 111 | 0.644986 | 4.209125 | false | false | false | false |
devilry/devilry-django | not_for_deploy/sysadmin_example_scripts/student_statistics.py | 1 | 6093 | #!/usr/bin/env python
import os
import django
import argparse
import arrow
from django.db import models
from django.utils import timezone
def get_arguments():
parser = argparse.ArgumentParser(description='Fetch delivery statistics for Devilry users.')
parser.add_argument(
'--username-list',
required=True,
nargs='+',
dest='username_list',
help='A list of Devilry user shortnames. Example: --username-list username1 username2 username3'
)
parser.add_argument(
'--subject-shortname',
required=True,
type=str,
dest='subject_shortname',
help='The shortname of a subject. This is unique.'
)
parser.add_argument(
'--period-shortname',
required=True,
type=str,
dest='period_shortname',
help='The shortname of a period/semester. This is unique together with the subject shortname.'
)
return vars(parser.parse_args())
if __name__ == "__main__":
# For development:
os.environ.setdefault('DJANGOENV', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devilry.project.settingsproxy")
django.setup()
# For production: Specify python path to your settings file here
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'devilry_settings')
# django.setup()
# Imports
#
# Must be done after django.setup()
from django.conf import settings
from devilry.apps.core.models import Period, Candidate
from devilry.devilry_dbcache.models import AssignmentGroupCachedData
from devilry.devilry_group.models import FeedbackSet, GroupComment
from devilry.devilry_comment.models import CommentFile
#
# Get script arguments
#
argument_dict = get_arguments()
#
# Get Period
#
period = Period.objects \
.get(
parentnode__short_name=argument_dict['subject_shortname'],
short_name=argument_dict['period_shortname']
)
#
# Loop through each username, and collect data about each
# assignment on the period.
#
#
#
now = timezone.now()
serial_number = 10000
for user_shortname in argument_dict['username_list']:
#
# Get all AssignmentGroups for the user on the Period.
# The reason for not getting the Assignments directly, is
# because we still need the AssignmentGroup to collect further
# data about deadlines, comments, feedback etc.
#
assignment_group_cached_data = AssignmentGroupCachedData.objects \
.select_related(
'group',
'group__parentnode',
'group__parentnode__parentnode'
) \
.filter(
group__parentnode__publishing_time__lt=now,
group_id__in=Candidate.objects \
.select_related(
'assignment_group',
'relatedstudent',
'relatedstudent__period'
) \
.filter(
relatedstudent__user__shortname=user_shortname,
relatedstudent__period=period,
assignment_group__parentnode__parentnode=period
) \
.values_list('assignment_group_id', flat=True)
) \
.order_by('group__parentnode__first_deadline')
#
# Collect data from each AssignmentGroup (by extension the Assignment)
# for the user.
#
for cached_group_data in assignment_group_cached_data:
# Get the last published FeedbackSet
#
# We only care about published/graded assignments.
last_published_feedbackset = cached_group_data.last_published_feedbackset
if not last_published_feedbackset:
continue
# Get timestampt for last delivery.
#
# The last delivery is the last comment from a student with files.
last_student_comment_with_files = GroupComment.objects \
.filter(
visibility=GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
user_role=GroupComment.USER_ROLE_STUDENT,
comment_type=GroupComment.COMMENT_TYPE_GROUPCOMMENT,
feedback_set=last_published_feedbackset) \
.annotate(
has_files=models.Exists(
CommentFile.objects.filter(comment_id=models.OuterRef('id'))
)
) \
.filter(has_files=True) \
.order_by('-published_datetime') \
.last()
last_delivery_datetime = None
if last_student_comment_with_files:
last_delivery_datetime = last_student_comment_with_files.published_datetime
# Get number of feedbacks.
#
# Merged feedbacksets (groups with more than student) is ignored.
feedback_num = FeedbackSet.objects \
.filter(
group=cached_group_data.group,
ignored=False,
grading_published_datetime__isnull=False,
feedbackset_type__in=[
FeedbackSet.FEEDBACKSET_TYPE_FIRST_ATTEMPT,
FeedbackSet.FEEDBACKSET_TYPE_NEW_ATTEMPT
]
).count()
print(
f'{serial_number} - '
f'{user_shortname} - '
f'{cached_group_data.group.parentnode.parentnode.parentnode.short_name} - '
f'{cached_group_data.group.parentnode.short_name} - '
f'{arrow.get(cached_group_data.group.parentnode.first_deadline).to(settings.TIME_ZONE)} - '
f'{arrow.get(last_delivery_datetime).to(settings.TIME_ZONE)} - '
f'{cached_group_data.new_attempt_count + 1} - '
f'{feedback_num}'
)
serial_number += 1
| bsd-3-clause | 8d88417a2d8202c4956d4ca210d2b62a | 35.48503 | 107 | 0.567044 | 4.473568 | false | false | false | false |
devilry/devilry-django | devilry/devilry_message/migrations/0001_initial.py | 1 | 3597 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-09 15:28
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_datetime', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('preparing', 'Preparing for sending'), ('sending', 'Sending'), ('error', 'Error'), ('sent', 'Sent')], db_index=True, default='draft', max_length=30)),
('status_data', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
('metadata', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
('context_type', models.CharField(choices=[('other', 'Other'), ('comment_delivery', 'Comment or delivery'), ('deadline_moved', 'Deadline moved'), ('new_attempt', 'New attempt'), ('feedback', 'Feedback'), ('feedback_updated', 'Grading updated')], default='other', max_length=255)),
('message_type', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=30), size=None)),
('virtual_message_receivers', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
('created_by', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MessageReceiver',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_datetime', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('status', models.CharField(choices=[('not_sent', 'Not sent'), ('failed', 'Failed'), ('error', 'Error'), ('sent', 'Sent')], default='not_sent', max_length=10)),
('status_data', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
('metadata', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
('subject', models.CharField(blank=True, default='', max_length=255)),
('message_content_plain', models.TextField(blank=True, default='')),
('message_content_html', models.TextField(blank=True, default='')),
('message_type', models.CharField(db_index=True, max_length=30)),
('send_to', models.CharField(blank=True, default='', max_length=255)),
('sent_datetime', models.DateTimeField(blank=True, null=True)),
('sending_failed_count', models.IntegerField(default=0)),
('sending_success_count', models.IntegerField(default=0)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='devilry_message.Message')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| bsd-3-clause | d5155b667716e9172b0dc91e3bf98d49 | 63.232143 | 296 | 0.631359 | 4.059819 | false | false | false | false |
devilry/devilry-django | devilry/devilry_group/models.py | 1 | 34983 | # -*- coding: utf-8 -*-
import warnings
import json
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import OuterRef
from django.db.models.functions import Coalesce
from django.utils import timezone
from django.utils.translation import gettext_lazy
from ievv_opensource.utils import choices_with_meta
from devilry.apps.core.models import assignment_group
from devilry.apps.core.models.custom_db_fields import ShortNameField, LongNameField
from devilry.devilry_comment import models as comment_models
class HardDeadlineExpiredException(Exception):
"""
Should be raised regarding GroupComments
if the deadline handling is hard, and the deadline has expired.
"""
def __init__(self, message, *args, **kwargs):
if not message:
raise ValueError('Message required. HardDeadlineExpiredException(message="Some message")')
self.message = message
class PeriodExpiredException(Exception):
"""
Should be raised regarding GroupComments if the
period(semester) has expired.
"""
def __init__(self, message, *args, **kwargs):
if not message:
raise ValueError('Message required. PeriodExpiredException(message="Some message")')
self.message = message
class AbstractGroupCommentQuerySet(models.QuerySet):
"""
Base class for QuerySets for :class:`.AbstractGroupComment`.
"""
def exclude_private_comments_from_other_users(self, user):
"""
Exclude all ``GroupComments`` with :obj:`.GroupComment.visibility` set to :obj:`.GroupComment.VISIBILITY_PRIVATE`
and the :obj:`.GroupComment.user` is not the ``user``.
Args:
user: The requestuser.
Returns:
QuerySet: QuerySet of :obj:`.GroupComment` not excluded.
"""
return self.exclude(
models.Q(visibility=AbstractGroupComment.VISIBILITY_PRIVATE) & ~models.Q(user=user)
)
def exclude_is_part_of_grading_feedbackset_unpublished(self):
"""
Exclude all :class:`.GroupComment` that has :obj:`.GroupComment.part_of_grading` set to ``True`` if the
:obj:`.GroupComment.feedback_set.grading_published_datetime` is ``None``.
Returns:
QuerySet: QuerySet of :obj:`.GroupComment` not excluded.
"""
return self.exclude(
part_of_grading=True,
feedback_set__grading_published_datetime__isnull=True
)
def exclude_comment_is_not_draft_from_user(self, user):
"""
Exclude :class:`.GroupComment` that are not drafts or the :obj:`.GroupComment.user` is not the requestuser.
A :class:`.GroupComment` is a draft if :obj:`.GroupComment.visibility` set to
:obj:`.GroupComment.VISIBILITY_PRIVATE` and :obj:`.GroupComment.part_of_grading` is ``True``.
Args:
user: The requestuser
Returns:
QuerySet: QuerySet of :obj:`.GroupComment` not excluded.
"""
return self.exclude(
~models.Q(part_of_grading=True, visibility=GroupComment.VISIBILITY_PRIVATE) | ~models.Q(user=user)
)
class AbstractGroupComment(comment_models.Comment):
"""
The abstract superclass of all comments related to a delivery and feedback.
"""
#: The related feedbackset. See :class:`.FeedbackSet`.
feedback_set = models.ForeignKey('FeedbackSet', on_delete=models.CASCADE)
#: If this is ``True``, the comment is published when the feedbackset
#: is published. This means that this comment is part of the feedback/grading
#: from the examiner. The same :obj:`~.AbstractGroupComment.visibility`
#: rules apply no matter if this is ``True`` or ``False``,
#: this only controls when the comment is published.
part_of_grading = models.BooleanField(default=False)
#: Comment only visible for :obj:`~devilry_comment.models.Comment.user` that created comment.
#: When this visibility choice is set, and :obj:`~.AbstractGroupComment.part_of_grading` is True, this
#: GroupComment is a drafted feedback and will be published when the :obj:`~.AbstractGroupComment.feedback_set`
# it belongs to is published.
#: Choice for :obj:`~.AbstractGroupComment.visibility`.
VISIBILITY_PRIVATE = 'private'
#: Comment should only be visible to examiners and admins that has
#: access to the :obj:`~.AbstractGroupComment.feedback_set`.
#: Choice for :obj:`~.AbstractGroupComment.visibility`.
VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS = 'visible-to-examiner-and-admins'
#: Comment should be visible to everyone that has
#: access to the :obj:`~.AbstractGroupComment.feedback_set`.
#: Choice for :obj:`~.AbstractGroupComment.visibility`.
VISIBILITY_VISIBLE_TO_EVERYONE = 'visible-to-everyone'
#: Choice list.
#: Choices for :obj:`~.AbstractGroupComment.visibility`.
VISIBILITY_CHOICES = [
(VISIBILITY_PRIVATE, 'Private'),
(VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS, 'Visible to examiners and admins'),
(VISIBILITY_VISIBLE_TO_EVERYONE, 'Visible to everyone'),
]
#: Sets the visibility choise of the comment.
#: Defaults to :obj:`~.AbstractGroupComment.VISIBILITY_VISIBLE_TO_EVERYONE`.
visibility = models.CharField(
max_length=50,
db_index=True,
choices=VISIBILITY_CHOICES,
default=VISIBILITY_VISIBLE_TO_EVERYONE,
)
class Meta:
abstract = True
def clean(self):
"""
Check for situations that should result in error.
:raises: ValidationError:
Error occurs if :obj:`~.AbstractGroupComment.user_role` is ``'student'`` and
:obj:`~.AbstractGroupComment.visibility` is not set to
:obj:`~.AbstractGroupComment.VISIBILITY_VISIBLE_TO_EVERYONE`
|
Error occurs if :obj:`~.AbstractGroupComment.user_role` is ``'examiner'`` and
:obj:`~.AbstractGroupComment.part_of_grading` is ``False`` and :obj:`~.AbstractGroupComment.visibility` is
set to :obj:`~.AbstractGroupComment.VISIBILITY_PRIVATE`.
"""
if self.user_role == 'student':
if self.visibility != AbstractGroupComment.VISIBILITY_VISIBLE_TO_EVERYONE:
raise ValidationError({
'visibility': gettext_lazy('A student comment is always visible to everyone'),
})
if self.user_role == 'examiner':
if not self.part_of_grading and self.visibility == AbstractGroupComment.VISIBILITY_PRIVATE:
raise ValidationError({
'visibility': gettext_lazy('A examiner comment can only be private if part of grading.')
})
def get_published_datetime(self):
"""
Get the publishing datetime of the comment. Publishing datetime is
the publishing time of the FeedbackSet if the comment has
:obj:`~devilry.devilry_group.models.AbstractGroupComment.part_of_grading`
set to True, else it's just the comments' published_datetime.
:return: Datetime.
"""
return self.feedback_set.grading_published_datetime \
if self.part_of_grading \
else self.published_datetime
def publish_draft(self, time):
"""
Sets the published datetime of the comment to ``time``.
:param time: publishing time to set for the comment.
"""
self.published_datetime = time
self.visibility = GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE
self.full_clean()
self.save()
def copy_comment_into_feedbackset(self, feedbackset):
"""
Creates a new GroupComment, copies all fields in self into
the new comment and sets feedback_set foreign key to ``feedbackset``
Args:
feedbackset: :class:`~devilry_group.FeedbackSet`
Returns:
:class:`~devilry_group.GroupComment` a new group comment
"""
commentcopy = GroupComment(
part_of_grading=self.part_of_grading,
feedback_set=feedbackset,
text=self.text,
draft_text=self.draft_text,
user=self.user,
parent=self.parent,
created_datetime=self.created_datetime,
published_datetime=self.published_datetime,
user_role=self.user_role,
comment_type=self.comment_type,
)
commentcopy.save()
for commentfile in self.commentfile_set.all():
commentfile.copy_into_comment(commentcopy)
return commentcopy
class FeedbackSetQuerySet(models.QuerySet):
"""
QuerySet for :class:`.FeedbackSet`.
"""
def get_order_by_deadline_datetime_argument(self):
"""
Get a Coalesce expression that can be used with ``order_by()``
to order feedbacksets by deadline. This handles
ordering the first feedbackset by the first deadline of the
assignment.
Examples:
Basics (same as using :meth:`.order_by_deadline_datetime`)::
FeedbackSet.objects.all()\
.order_by(FeedbackSet.objects.get_order_by_deadline_datetime_argument())
Combine with other order by arguments::
FeedbackSet.objects.all()\
.order_by('group__parentnode__short_name',
'group__id',
FeedbackSet.objects.get_order_by_deadline_datetime_argument())
"""
return Coalesce('deadline_datetime', 'group__parentnode__first_deadline')
def order_by_deadline_datetime(self):
"""
Order by ``deadline_datetime``.
Unlike just using ``order_by('deadline_datetime')``, this method
uses :meth:`.get_order_by_deadline_datetime_argument`, which
ensures that the first feedbackset is ordered using
the first deadline of the assignment.
"""
return self.order_by(self.get_order_by_deadline_datetime_argument())
def has_public_comment_files_from_students(self, feedback_set):
"""
Does the :class:`devilry.devilry_group.models.FeedbackSet` have any
public :class:`devilry.devilry_comment.models.CommentFile` from students?
Args:
feedback_set: The `FeedbackSet` to check.
Returns:
``True`` if any public student files, else ``False``.
"""
from devilry.devilry_comment import models as comment_models
group_comment_ids = GroupComment.objects \
.filter(feedback_set=feedback_set,
user_role=GroupComment.USER_ROLE_STUDENT,
visibility=GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE) \
.values_list('id', flat=True)
return comment_models.CommentFile.objects \
.filter(id__in=group_comment_ids)\
.exists()
def filter_public_comment_files_from_students(self):
"""
Get all `devilry.devilry_group.models.FeedbackSet` with public comments from students.
Returns:
QuerySet: A `FeedbackSet` queryset.
"""
from devilry.devilry_comment import models as comment_models
comment_file_ids = comment_models.CommentFile.objects.all()\
.values_list('comment_id')
feedback_set_ids = GroupComment.objects \
.filter(user_role=GroupComment.USER_ROLE_STUDENT,
visibility=GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE) \
.filter(id__in=comment_file_ids) \
.values_list('feedback_set_id', flat=True)
return FeedbackSet.objects.filter(id__in=feedback_set_ids)
class FeedbackSet(models.Model):
"""
All comments that are given for a specific deadline (delivery and feedback) are
linked to a feedback-set.
If the comment has `instant_publish=True` it will be published instantly, otherwise the comments will only be
visible once the feedbackset is published.
All student-comments will be `instant_publish=True`, and the same applies to comments made by examiners that
are not a part of feedback.
"""
objects = FeedbackSetQuerySet.as_manager()
#: The AssignmentGroup that owns this feedbackset.
group = models.ForeignKey(assignment_group.AssignmentGroup, on_delete=models.CASCADE)
#: This means the feedbackset is basically the first feedbackset.
#: Choice for :obj:`~.FeedbackSet.feedbackset_type`.
FEEDBACKSET_TYPE_FIRST_ATTEMPT = 'first_attempt'
#: Is not the first feedbackset, but a new attempt.
#: Choice for :obj:`~.FeedbackSet.feedbackset_type`
FEEDBACKSET_TYPE_NEW_ATTEMPT = 'new_attempt'
#: Something went wrong on grading, with this option, a new
#: deadline should not be given to student. Student should just
#: get notified that a new feedback was given.
#: Choice for :obj:`~.FeedbackSet.feedbackset_type`.
FEEDBACKSET_TYPE_RE_EDIT = 're_edit'
#: A merged first attempt feedbackset
FEEDBACKSET_TYPE_MERGE_FIRST_ATTEMPT = 'merge_first_attempt'
#: A merged new attempt feedbackset
FEEDBACKSET_TYPE_MERGE_NEW_ATTEMPT = 'merge_new_attempt'
#: A merged re edit feedbackset
FEEDBACKSET_TYPE_MERGE_RE_EDIT = 'merge_re_edit'
#: Grading status choices for :obj:`~.FeedbackSet.feedbackset_type`.
FEEDBACKSET_TYPE_CHOICES = [
(FEEDBACKSET_TYPE_FIRST_ATTEMPT, gettext_lazy('first attempt')),
(FEEDBACKSET_TYPE_NEW_ATTEMPT, gettext_lazy('new attempt')),
(FEEDBACKSET_TYPE_RE_EDIT, gettext_lazy('re edit')),
(FEEDBACKSET_TYPE_MERGE_FIRST_ATTEMPT, gettext_lazy('merge first attempt')),
(FEEDBACKSET_TYPE_MERGE_NEW_ATTEMPT, gettext_lazy('merge new attempt')),
(FEEDBACKSET_TYPE_MERGE_RE_EDIT, gettext_lazy('merge re edit')),
]
#: Sets the type of the feedbackset.
#: Defaults to :obj:`~.FeedbackSet.FEEDBACKSET_TYPE_NEW_ATTEMPT`.
feedbackset_type = models.CharField(
max_length=50,
db_index=True,
choices=FEEDBACKSET_TYPE_CHOICES,
default=FEEDBACKSET_TYPE_NEW_ATTEMPT
)
#: Field can be set to ``True`` if a situation requires the :obj:`~.FeedbackSet` to not be counted as neither
#: passed or failed but should be ignored, due to e.g sickness or something else. A reason for
#: the :obj:`~.FeedbackSet` to be ignored must be provided in the :attr:`~FeedbackSet.ignored_reason`.
ignored = models.BooleanField(default=False)
#: The reason for the :obj:`~FeedbackSet` to be ignored.
ignored_reason = models.TextField(null=False, blank=True, default='')
#: The datetime for when the :obj:`~.FeedbackSet` was ignored.
ignored_datetime = models.DateTimeField(null=True, blank=True)
#: The User that created the feedbackset. Only used as metadata
#: for superusers (for debugging).
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="created_feedbacksets",
null=True, blank=True,
on_delete=models.SET_NULL)
#: The datetime when this FeedbackSet was created.
created_datetime = models.DateTimeField(default=timezone.now)
#: Last updated by.
#: The user that was the last to make any changes on the :obj:`.FeedbackSet`.
last_updated_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='updated_feedbacksets'
)
#: The datetime of the deadline.
#: The first feedbackset in an AssignmentGroup
#: (ordered by :obj:`~.FeedbackSet.created_datetime`) does not
#: have a deadline. It inherits this from the ``first_deadline`` field
#: of :class:`devilry.apps.core.models.assignment.Assignment`.
deadline_datetime = models.DateTimeField(null=False, blank=False)
#: The datetime when the feedback was published.
#: Set when an examiner publishes the feedback for this FeedbackSet.
#:
#: When this is ``None``, the feedbackset is not published. This means that
#: no comments with :obj:`.AbstractGroupComment.part_of_grading` set to ``True``
#: is visible, the grade (extracted from points) is not visible, and this
#: feedbackset does not count when extracting the latest/active feedback/grade
#: for the AssignmentGroup.
grading_published_datetime = models.DateTimeField(
null=True,
blank=True
)
#: Set when the feedbackset is published by an examiner.
#: If this is ``None``, the feedback is not published, and
#: the ``points`` (grade) is not available to the student.
grading_published_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
related_name="published_feedbacksets",
null=True, blank=True,
on_delete=models.SET_NULL
)
#: Points given by examiner for this feedbackset.
#: The points on the last published FeedbackSet is the current
#: grade for the AssignmentGroup.
grading_points = models.PositiveIntegerField(
null=True, blank=True
)
#: A :class:`django.db.models.TextField` for a gradeform filled or not filled for
#: FeedbackSet.
gradeform_data_json = models.TextField(
null=False, blank=True, default=''
)
def __str__(self):
return "{} - {} - {} - deadline: {} - points: {}".format(
self.group.assignment,
self.feedbackset_type,
self.group.get_unanonymized_long_displayname(),
self.current_deadline(),
self.grading_points)
@classmethod
def clean_deadline(cls, deadline_datetime):
return deadline_datetime.replace(microsecond=0)
@property
def is_merge_type(self):
return self.feedbackset_type == self.FEEDBACKSET_TYPE_MERGE_FIRST_ATTEMPT or \
self.feedbackset_type == self.FEEDBACKSET_TYPE_MERGE_NEW_ATTEMPT or \
self.feedbackset_type == self.FEEDBACKSET_TYPE_MERGE_RE_EDIT
def clean(self):
"""
Check for situations that should result in error.
:raises: ValidationError:
Error occurs if :attr:`~.FeedbackSet.ignored` is ``True`` and :obj:`~.FeedbackSet.ignored_reason` is blank.
|
Error occurs if :attr:`~.FeedbackSet.ignored_reason` is filled and :attr:`~.FeedbackSet.ignored`
is ``True``.
|
Error occurs if :attr:`~.FeedbackSet.grading_published_datetime` has a datetime but
:obj:`~.FeedbackSet.grading_published_by` is ``None``.
|
Error occurs if :attr:`~.FeedbackSet.grading_published_datetime` has a datetime but
:obj:`~.FeedbackSet.grading_points` is ``None``.
"""
if self.ignored and len(self.ignored_reason) == 0:
raise ValidationError({
'ignored': gettext_lazy('FeedbackSet can not be ignored without a reason')
})
elif len(self.ignored_reason) > 0 and not self.ignored:
raise ValidationError({
'ignored_reason': gettext_lazy('FeedbackSet can not have a ignored reason '
'without being set to ignored.')
})
elif self.ignored and (self.grading_published_datetime or self.grading_points or self.grading_published_by):
raise ValidationError({
'ignored': gettext_lazy('Ignored FeedbackSet can not have grading_published_datetime, '
'grading_points or grading_published_by set.')
})
else:
if self.grading_published_datetime is not None and self.grading_published_by is None:
raise ValidationError({
'grading_published_datetime': gettext_lazy('A FeedbackSet can not be published '
'without being published by someone.'),
})
if self.grading_published_datetime is not None and self.grading_points is None:
raise ValidationError({
'grading_published_datetime': gettext_lazy('A FeedbackSet can not be published '
'without providing "points".'),
})
self.deadline_datetime = FeedbackSet.clean_deadline(self.deadline_datetime)
def current_deadline(self, assignment=None):
warnings.warn("deprecated, use FeedbackSet.deadline_datetime instead", DeprecationWarning)
return self.deadline_datetime
def __get_drafted_comments(self, user):
"""
Get all drafted comments for this FeedbackSet drafted by ``user``.
:param user: Current user.
:return: QuerySet of GroupComments
"""
return GroupComment.objects.filter(
feedback_set=self,
part_of_grading=True
).exclude_private_comments_from_other_users(
user=user
).order_by('created_datetime')
def can_add_comment(self, comment_user_role, assignment=None):
"""
Check if comments and uploads should be disabled for this feedback set
for the role of the comment.
This method raises a custom exception based on what why comments and uploads are not allowed:
Raises :class:`~.HardDeadlineExpiredException` if the assignment for this feedback set has deadline
handling set to hard, students can not upload or add comments.
Raises :class:`~.PeriodExpiredException` if the period has expired, no one can upload or
add comments.
A message will be provided with the exceptions.
Args:
comment_user_role: One of the choices for :class:`~devilry.devilry_comment.models.Comment.user_role`.
``Comment.USER_ROLE_STUDENT``, ``Comment.USER_ROLE_EXAMINER`` or ``Comment.USER_ROLE_ADMIN``.
assignment: The assignment for this feedback set.
"""
if not assignment:
assignment = self.group.assignment
period = assignment.period
now = timezone.now()
if period.start_time > now or period.end_time < now:
raise PeriodExpiredException(
message=gettext_lazy('This assignment is on an inactive semester. '
'File upload and commenting is disabled.')
)
if assignment.deadline_handling_is_hard() and self.deadline_datetime < now:
if comment_user_role == comment_models.Comment.USER_ROLE_STUDENT:
raise HardDeadlineExpiredException(
message=gettext_lazy('Hard deadlines are enabled for this assignment. '
'File upload and commenting is disabled.')
)
def publish(self, published_by, grading_points, gradeform_data_json=''):
"""
Publishes this FeedbackSet and comments that belongs to this it and that are
part of the grading.
:param published_by: Who published the feedbackset.
:param grading_points: Points to give to student(s).
:param gradeform_data_json: gradeform(coming soon).
:return: True or False and an error message.
"""
current_deadline = self.deadline_datetime
if current_deadline is None:
return False, 'Cannot publish feedback without a deadline.'
drafted_comments = self.__get_drafted_comments(published_by)
now_without_seconds = timezone.now().replace(microsecond=0)
for modifier, draft in enumerate(drafted_comments):
draft.publish_draft(now_without_seconds + timezone.timedelta(microseconds=modifier))
self.grading_points = grading_points
self.grading_published_datetime = now_without_seconds + timezone.timedelta(
microseconds=drafted_comments.count() + 1)
self.grading_published_by = published_by
self.full_clean()
self.save()
return True, ''
def copy_feedbackset_into_group(self, group, target=None):
"""
Copy this feedbackset into ``target`` or create a new feedbackset,
and set group foreign key to ``group``
Args:
group: :class:`~core.AssignmentGroup`
target: :class:`~devilry_group.FeedbackSet`
Returns:
:class:`~devilry_group.FeedbackSet` a feedbackset with copied data from self
"""
feedbackset_kwargs = {
'group': group,
'feedbackset_type': self.feedbackset_type,
'ignored': self.ignored,
'ignored_reason': self.ignored_reason,
'ignored_datetime': self.ignored_datetime,
'created_by': self.created_by,
'created_datetime': self.created_datetime,
'deadline_datetime': self.deadline_datetime,
'grading_published_datetime': self.grading_published_datetime,
'grading_published_by': self.grading_published_by,
'grading_points': self.grading_points,
'gradeform_data_json': self.gradeform_data_json
}
if target is None:
target = FeedbackSet(**feedbackset_kwargs)
else:
for key, value in feedbackset_kwargs.items():
setattr(target, key, value)
target.save()
for comment in self.groupcomment_set.all():
comment.copy_comment_into_feedbackset(target)
return target
@property
def gradeform_data(self):
if self.gradeform_data_json:
if not hasattr(self, '_gradeform_data'):
# Store the decoded gradeform_data to avoid re-decoding the json for
# each access. We invalidate this cache in the setter.
self._gradeform_data = json.loads(self.gradeform_data_json)
return self._gradeform_data
else:
return None
@gradeform_data.setter
def gradeform_data(self, gradeform_data):
self.gradeform_data_json = json.dumps(gradeform_data)
if hasattr(self, '_gradeform_data'):
delattr(self, '_gradeform_data')
class FeedbacksetPassedPreviousPeriod(models.Model):
"""
This model is used when a student have passed an assignment in previous period.
Therefore we need to save some old data about the :class:`core.Assignment`, :class:`devilry_group.FeedbackSet`
and :class:`core.Period` from previous period.
"""
PASSED_PREVIOUS_SEMESTER_TYPES = choices_with_meta.ChoicesWithMeta(
choices_with_meta.Choice(
value='manual'
),
choices_with_meta.Choice(
value='auto'
)
)
#: Foreign key to class:`devilry_group.FeedbackSet` in current period.
feedbackset = models.OneToOneField(
to=FeedbackSet,
null=True, blank=True,
on_delete=models.CASCADE)
#: The type of this entry. How it was generated.
passed_previous_period_type = models.CharField(
max_length=255,
null=False, blank=False,
choices=PASSED_PREVIOUS_SEMESTER_TYPES.iter_as_django_choices_short()
)
#: Old :attr:`core.Assignment.short_name`.
assignment_short_name = ShortNameField(
blank=True, default=''
)
#: Old :attr:`core.Assignment.long_name`.
assignment_long_name = LongNameField(
blank=True, default=''
)
# Old :attr:`core.Assignment.max_points`.
assignment_max_points = models.PositiveIntegerField(
default=0, null=True, blank=True
)
# Old :attr:`core.Assignment.passing_grade_min_points`
assignment_passing_grade_min_points = models.PositiveIntegerField(
default=0, null=True, blank=True
)
# Old :attr:`core.Period.short_name`.
period_short_name = ShortNameField(
blank=True, default=''
)
# Old :attr:`core.Period.long_name`
period_long_name = LongNameField(
blank=True, default=''
)
# Old :attr:`core.Period.start_time`
period_start_time = models.DateTimeField(
null=True, default=None
)
# Old :attr:`core.Period.end_time`
period_end_time = models.DateTimeField(
null=True, default=None
)
# Old :attr:`FeedbackSet.grading_points`.
grading_points = models.PositiveIntegerField(
default=0, null=True, blank=True
)
# Old :attr:`FeedbackSet.grading_published_by`
grading_published_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='passed_previous_period_published_by'
)
# Old :attr:`FeedbackSet.
grading_published_datetime = models.DateTimeField(
null=True, default=None
)
#: When this entry was created.
created_datetime = models.DateTimeField(
default=timezone.now
)
#: Who this entry was created by.
created_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='passed_previous_period_created_by'
)
class FeedbackSetGradingUpdateHistory(models.Model):
"""
Logs changes on the grading for a feedbackset.
If we have this history, there will be no problem changing the grades on an already corrected feedback set, as we
can display the history, just as with FeedbackSetDeadlineHistory.
"""
#: The :class:`~.FeedbackSet` the update is for.
feedback_set = models.ForeignKey(
to=FeedbackSet,
on_delete=models.CASCADE,
related_name='grading_update_histories'
)
#: The user that updated the feedback set.
updated_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
#: When the update was made.
updated_datetime = models.DateTimeField(
default=timezone.now
)
#: The score before update
old_grading_points = models.PositiveIntegerField(
null=False, blank=False
)
#: Who published the feedbackset before the update.
old_grading_published_by = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
#: Grading publishing datetime before update
old_grading_published_datetime = models.DateTimeField(
null=False, blank=False
)
def __str__(self):
return 'FeedbackSet id: {} - points: {} - updated_datetime: {}'.format(
self.feedback_set.id, self.old_grading_points, self.updated_datetime)
class FeedbackSetDeadlineHistory(models.Model):
"""
Logs change in deadline for a FeedbackSet.
"""
#: The :class:`~.FeedbackSet` the change is for.
feedback_set = models.ForeignKey(FeedbackSet, on_delete=models.CASCADE)
#: The User that made the deadline change.
changed_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE)
#: Time of change.
#: Defaults to ``timezone.now``.
changed_datetime = models.DateTimeField(null=False, blank=False, default=timezone.now)
#: The old :attr:`~.FeedbackDet.deadline_datetime`.
deadline_old = models.DateTimeField(null=False, blank=False)
#: The new :attr:`~.FeedbackDet.deadline_datetime`.
deadline_new = models.DateTimeField(null=False, blank=False)
def __str__(self):
return 'Changed {}: from {} to {}'.format(self.changed_datetime, self.deadline_old, self.deadline_new)
class GroupCommentQuerySet(AbstractGroupCommentQuerySet):
"""
QuerySet for :class:`.GroupComment`.
"""
def annotate_with_last_edit_history(self, requestuser_devilryrole):
edit_history_subquery = GroupCommentEditHistory.objects \
.filter(group_comment_id=OuterRef('id'))
if requestuser_devilryrole == 'student':
edit_history_subquery = edit_history_subquery\
.filter(visibility=GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
edit_history_subquery = edit_history_subquery\
.order_by('-edited_datetime')\
.values('edited_datetime')[:1]
return self.annotate(
last_edithistory_datetime=models.Subquery(
edit_history_subquery, output_field=models.DateTimeField()
)
)
class GroupComment(AbstractGroupComment):
"""
A comment made to an `AssignmentGroup`.
"""
objects = GroupCommentQuerySet.as_manager()
#: v2 "<modelname>_<id>"
#: This is only here to make it possible to debug and fix
#: v2 migrations if anything goes wrong.
v2_id = models.CharField(
max_length=255,
null=False, blank=True, default="")
def __str__(self):
return "{} - {} - {}".format(self.feedback_set, self.user_role, self.user)
def clean(self):
try:
self.feedback_set.can_add_comment(
comment_user_role=self.user_role,
assignment=self.feedback_set.group.parentnode)
except (HardDeadlineExpiredException, PeriodExpiredException) as e:
raise ValidationError(message=e.message)
super(GroupComment, self).clean()
class GroupCommentEditHistoryQuerySet(models.QuerySet):
def exclude_private_comment_not_created_by_user(self, user):
"""
Exclude all :class:`.GroupCommentEditHistory` entries that are private and
where the comment is not created by the user.
Args:
user: The user to check against.
"""
return self.exclude(
models.Q(visibility=GroupComment.VISIBILITY_PRIVATE)
&
~models.Q(group_comment__user=user))
class GroupCommentEditHistory(comment_models.CommentEditHistory):
"""
Model for logging changes in a :class:`.GroupComment`.
"""
objects = GroupCommentEditHistoryQuerySet.as_manager()
#: The :class:`.GroupComment` the editing history is for.
group_comment = models.ForeignKey(
to=GroupComment,
on_delete=models.CASCADE
)
#: Visibility state when log entry was created.
visibility = models.CharField(
max_length=50,
db_index=True
)
def __str__(self):
return 'GroupComment: {} - {}'.format(self.group_comment.user_role, self.group_comment.user)
class ImageAnnotationCommentQuerySet(AbstractGroupCommentQuerySet):
"""
QuerySet for :class:`.ImageAnnotationComment`.
"""
class ImageAnnotationComment(AbstractGroupComment):
"""
A comment made on a file, as an annotation
"""
objects = ImageAnnotationCommentQuerySet.as_manager()
image = models.ForeignKey(comment_models.CommentFileImage, on_delete=models.CASCADE)
x_coordinate = models.PositiveIntegerField()
y_coordinate = models.PositiveIntegerField()
| bsd-3-clause | b1443be429528debf906c04edd824012 | 37.698009 | 121 | 0.641912 | 4.189581 | false | false | false | false |
devilry/devilry-django | devilry/devilry_admin/views/assignment/gradingconfiguration.py | 1 | 8605 |
import json
from crispy_forms import layout
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.translation import pgettext_lazy, gettext_lazy, pgettext
from cradmin_legacy.crispylayouts import PrimarySubmit
from cradmin_legacy.viewhelpers import formbase
from cradmin_legacy.viewhelpers.crudbase import OnlySaveButtonMixin
from devilry.apps.core.models import Assignment
from devilry.apps.core.models import PointToGradeMap
class GradingConfigurationForm(forms.Form):
error_messages = {
'point_to_grade_map_json_invalid_format': gettext_lazy(
'The grade to points table must have at least 2 rows. The first row must '
'have 0 as points.'
),
'max_points_too_small_for_point_to_grade_map': gettext_lazy(
'The grade to points table has points that is larger than the '
'maximum number of points.'
),
'max_points_larger_than_passing_grade_min_points': gettext_lazy(
'Must be larger than the minimum number of points required to pass.'
)
}
grading_system_plugin_id = forms.ChoiceField(
required=True,
widget=forms.RadioSelect(),
choices=Assignment.GRADING_SYSTEM_PLUGIN_ID_CHOICES,
label=pgettext_lazy(
'assignment config', 'Examiner chooses')
)
points_to_grade_mapper = forms.ChoiceField(
required=True,
widget=forms.RadioSelect(),
choices=Assignment.POINTS_TO_GRADE_MAPPER_CHOICES,
label=pgettext_lazy(
'assignment config', 'Students see')
)
passing_grade_min_points = forms.IntegerField(
required=True,
min_value=0,
label=pgettext_lazy(
'assignment config',
'Minimum number of points required to pass')
)
max_points = forms.IntegerField(
min_value=0,
required=True,
label='...',
help_text='...'
)
point_to_grade_map_json = forms.CharField(
required=False,
# widget=forms.TextInput()
widget=forms.HiddenInput()
)
def __sort_point_to_grade_map(self, point_to_grade_map):
return list(sorted(point_to_grade_map, key=lambda item: item[0]))
def get_point_to_grade_map(self):
return self.__sort_point_to_grade_map(
json.loads(self.cleaned_data['point_to_grade_map_json']))
def clean(self):
cleaned_data = super(GradingConfigurationForm, self).clean()
passing_grade_min_points = cleaned_data.get('passing_grade_min_points', None)
max_points = cleaned_data.get('max_points', None)
points_to_grade_mapper = cleaned_data.get('points_to_grade_mapper')
point_to_grade_map_json = cleaned_data.get('point_to_grade_map_json', '').strip()
point_to_grade_map = None
if points_to_grade_mapper and points_to_grade_mapper == Assignment.POINTS_TO_GRADE_MAPPER_CUSTOM_TABLE:
if not point_to_grade_map_json:
raise ValidationError(self.error_messages['point_to_grade_map_json_invalid_format'])
point_to_grade_map = self.__sort_point_to_grade_map(json.loads(point_to_grade_map_json))
if len(point_to_grade_map) < 2:
raise ValidationError(self.error_messages['point_to_grade_map_json_invalid_format'])
if passing_grade_min_points is not None and max_points is not None:
if passing_grade_min_points > max_points:
raise ValidationError({
'max_points': self.error_messages['max_points_larger_than_passing_grade_min_points']
})
if max_points is not None and point_to_grade_map:
largest_point_in_map = point_to_grade_map[-1][0]
if largest_point_in_map > max_points:
raise ValidationError({
'max_points': self.error_messages['max_points_too_small_for_point_to_grade_map']
})
class AssignmentGradingConfigurationUpdateView(OnlySaveButtonMixin, formbase.FormView):
form_class = GradingConfigurationForm
template_name = 'devilry_admin/assignment/gradingconfiguration-update/' \
'gradingconfiguration-update.django.html'
extra_form_css_classes = ['cradmin-legacy-form-noasterisk']
form_attributes = {
'data-ievv-jsbase-widget': 'devilry-grading-configuration',
'data-ievv-jsbase-widget-config': json.dumps({
Assignment.GRADING_SYSTEM_PLUGIN_ID_PASSEDFAILED: {
'max_points_label': pgettext(
'assignment config',
'Points awarded for passing grade'),
},
Assignment.GRADING_SYSTEM_PLUGIN_ID_POINTS: {
'max_points_label': pgettext(
'assignment config',
'Maximum number of points'),
'max_points_help_text': pgettext(
'assignment config',
'The maximum number of points possible for this assignment.'),
},
})
}
def get_pagetitle(self):
return pgettext_lazy('assignment config',
'Edit grading configuration')
def _render_custom_table_div(self):
return render_to_string(
'devilry_admin/assignment/gradingconfiguration-update/custom-table.django.html',
context={},
request=self.request)
def get_field_layout(self):
return [
layout.Div(
'grading_system_plugin_id',
'points_to_grade_mapper',
layout.HTML(self._render_custom_table_div()),
'point_to_grade_map_json',
'passing_grade_min_points',
'max_points',
css_class='cradmin-globalfields')
]
@property
def assignment(self):
if not hasattr(self, '_assignment'):
queryset = Assignment.objects\
.filter(id=self.request.cradmin_role.id)\
.prefetch_point_to_grade_map()
self._assignment = queryset.get()
return self._assignment
def get_initial(self):
initial = {
'grading_system_plugin_id': self.assignment.grading_system_plugin_id,
'points_to_grade_mapper': self.assignment.points_to_grade_mapper,
'passing_grade_min_points': self.assignment.passing_grade_min_points,
'max_points': self.assignment.max_points,
}
if self.assignment.prefetched_point_to_grade_map is not None:
initial['point_to_grade_map_json'] = json.dumps(
self.assignment.prefetched_point_to_grade_map.as_choices())
return initial
def get_buttons(self):
return [
PrimarySubmit('save', gettext_lazy('Save')),
]
def __create_point_to_grade_map(self, form, assignment):
PointToGradeMap.objects.filter(assignment=assignment).delete()
if form.cleaned_data['points_to_grade_mapper'] != Assignment.POINTS_TO_GRADE_MAPPER_CUSTOM_TABLE:
return
point_to_grade_map = PointToGradeMap.objects.create(
assignment=assignment, invalid=False)
point_to_grade_map.create_map(*form.get_point_to_grade_map())
point_to_grade_map.full_clean()
def form_valid(self, form):
assignment = self.request.cradmin_role
assignment.grading_system_plugin_id = form.cleaned_data['grading_system_plugin_id']
assignment.points_to_grade_mapper = form.cleaned_data['points_to_grade_mapper']
assignment.passing_grade_min_points = form.cleaned_data['passing_grade_min_points']
assignment.max_points = form.cleaned_data['max_points']
assignment.full_clean()
with transaction.atomic():
assignment.save()
self.__create_point_to_grade_map(form=form, assignment=assignment)
messages.success(
request=self.request,
message=gettext_lazy('Saved Grading configuration for assignment')
)
return super(AssignmentGradingConfigurationUpdateView, self).form_valid(form)
def get_backlink_url(self):
return self.request.cradmin_instance.rolefrontpage_url()
def get_context_data(self, **kwargs):
context = super(AssignmentGradingConfigurationUpdateView, self).get_context_data(**kwargs)
context['backlink_url'] = self.get_backlink_url()
return context
| bsd-3-clause | f5d0e1f6e997f95c188e70b65ab0438f | 40.771845 | 111 | 0.626496 | 3.94363 | false | true | false | false |
devilry/devilry-django | devilry/devilry_import_v2database/modelimporters/modelimporter_utils.py | 1 | 2335 | import mimetypes
import sys
from django.conf import settings
class BulkCreator(object):
def __init__(self, model_class):
self.model_class = model_class
self.max_bulk_create = getattr(settings,
'DEVILRY_V2_DATABASE_MAX_BULK_CREATE_OVERRIDE',
5000)
self._objects = []
def add(self, *obj):
self._objects.extend(obj)
if len(self._objects) >= self.max_bulk_create:
self.save_objects()
def save_objects(self):
if self._objects:
self.model_class.objects.bulk_create(self._objects)
self.clear()
def clear(self):
self._objects = []
def __enter__(self):
self.clear()
# global logger_singleton
# logger_singleton.save_objects()
return self
def __exit__(self, ttype, value, traceback):
self.save_objects()
class ProgressDots(object):
def __init__(self, interval=100, messageformat='One dot per {interval}: '):
self._count = 0
self._interval = interval
self._messageformat = messageformat
self._enabled = getattr(settings, 'DEVILRY_V2_DATABASE_PRINT_PROGRESS_DOTS', True)
def increment_progress(self, increment=1):
self._count += increment
if self._enabled:
if self._count % self._interval == 0:
sys.stdout.write('.')
sys.stdout.flush()
def __enter__(self):
self._count = 0
if self._enabled:
sys.stdout.write(self._messageformat.format(interval=self._interval))
sys.stdout.flush()
return self
def __exit__(self, ttype, value, traceback):
if self._enabled:
sys.stdout.write('\n')
def get_mimetype_from_filename(filename):
mimetype = 'application/octet-stream'
if filename:
detected_mimetype = mimetypes.guess_type(filename)
if detected_mimetype[0]:
mimetype = detected_mimetype[0]
return mimetype
def make_flat_v2_id(object_dict):
return '{}__{}'.format(object_dict['model'].split('.')[1], object_dict['pk'])
def make_staticfeedback_fileattachment_v2_id(staticfeedback_id, attachment_id):
return 'staticfeedbackfileattachment__{}__{}'.format(staticfeedback_id, attachment_id)
| bsd-3-clause | e48760422f018f5b4f7c53754f069d3b | 28.935897 | 90 | 0.591863 | 4.025862 | false | false | false | false |
devilry/devilry-django | devilry/utils/management.py | 1 | 3008 | import sys
from optparse import make_option
DEFAULT_ENCODING = 'utf-8'
def get_input_encoding():
""" Get the input encoding used for input to management commands.
:return: ``sys.stdin.encoding``
"""
return sys.stdin.encoding or sys.getdefaultencoding() or DEFAULT_ENCODING
def make_input_encoding_option():
"""
Make optparse ``--input-encoding`` option that should be used on management
commands using input/output.
``dest`` is set to ``inputencoding``.
"""
return make_option('--input-encoding',
dest='inputencoding',
default=get_input_encoding(),
help=('Input encoding. Defaults to ``sys.stdin.encoding``, falling back '
'to ``sys.getdefaultencoding()`` and back to utf-8 if both are undefined. '
'It is currently is set to: {0}').format(get_input_encoding()))
def add_input_encoding_argument(parser):
"""
Add argparse ``--input-encoding`` option that should be used on management
commands using input/output.
``dest`` is set to ``inputencoding``.
"""
return parser.add_argument(
'--input-encoding',
dest='inputencoding',
default=get_input_encoding(),
help=('Input encoding. Defaults to ``sys.stdin.encoding``, falling back '
'to ``sys.getdefaultencoding()`` and back to utf-8 if both are undefined. '
'It is currently is set to: {0}').format(get_input_encoding()))
def get_output_encoding():
""" Get the output encoding used for output to management commands.
:return: ``sys.stdout.encoding``
"""
return sys.stdin.encoding or sys.getdefaultencoding() or DEFAULT_ENCODING
def make_output_encoding_option():
"""
Make optparse ``--output-encoding`` option that should be used on
management commands using output/output.
``dest`` is set to ``outputencoding``.
"""
return make_option('--output-encoding',
dest='outputencoding',
default=get_output_encoding(),
help=('Output encoding. Defaults to ``sys.stdout.encoding``, falling back '
'to ``sys.getdefaultencoding()`` and back to utf-8 if both are undefined. '
'It is currently is set to: {0}').format(get_output_encoding()))
def add_output_encoding_argument(parser):
"""
Add argparse ``--output-encoding`` option that should be used on
management commands using output/output.
``dest`` is set to ``outputencoding``.
"""
return parser.add_argument(
'--output-encoding',
dest='outputencoding',
default=get_output_encoding(),
help=('Output encoding. Defaults to ``sys.stdout.encoding``, falling back '
'to ``sys.getdefaultencoding()`` and back to utf-8 if both are undefined. '
'It is currently is set to: {0}').format(get_output_encoding()))
| bsd-3-clause | 64cb768c8eee92a33caa459b2e3bf0c1 | 36.6 | 104 | 0.607048 | 4.378457 | false | false | false | false |
devilry/devilry-django | devilry/devilry_cradmin/devilry_listbuilder/period.py | 1 | 1861 |
from cradmin_legacy.viewhelpers import listbuilder
class AdminItemValueMixin(object):
"""
Item value for a Period in the admin UI.
"""
valuealias = 'period'
template_name = 'devilry_cradmin/devilry_listbuilder/period/admin-itemvalue.django.html'
def get_title(self):
return self.period.long_name
def get_description(self):
return True # Return True to get the description-content block to render.
class AdminItemValue(AdminItemValueMixin, listbuilder.itemvalue.TitleDescription):
"""
ItemValue renderer for a single period for admins.
"""
def get_base_css_classes_list(self):
cssclasses = super(AdminItemValue, self).get_base_css_classes_list()
cssclasses.append('devilry-cradmin-perioditemvalue-admin')
return cssclasses
class StudentItemValueMixin(object):
"""
Item value for a Period in the student UI.
Requires the Period queryset to be annotated with:
- :meth:`devilry.apps.core.models.PeriodQuerySet.extra_annotate_with_assignmentcount_for_studentuser`.
- :meth:`devilry.apps.core.models.PeriodQuerySet.extra_annotate_with_user_qualifies_for_final_exam`.
"""
valuealias = 'period'
template_name = 'devilry_cradmin/devilry_listbuilder/period/student-itemvalue.django.html'
def get_title(self):
return self.period.long_name
def get_description(self):
return True # Return True to get the description-content block to render.
class StudentItemValue(StudentItemValueMixin, listbuilder.itemvalue.TitleDescription):
"""
ItemValue renderer for a single period for students.
"""
def get_base_css_classes_list(self):
cssclasses = super(StudentItemValue, self).get_base_css_classes_list()
cssclasses.append('devilry-cradmin-perioditemvalue-student')
return cssclasses
| bsd-3-clause | 34173530bc9f471d0c6c15fa8a0e39e6 | 32.836364 | 106 | 0.718431 | 3.722 | false | false | false | false |
devilry/devilry-django | devilry/utils/passed_in_previous_period.py | 1 | 11545 | import math
from django.core.exceptions import ValidationError
from django.db import models, transaction
from devilry.apps.core.models import Assignment
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import Candidate
from devilry.devilry_group.models import FeedbackSet, FeedbacksetPassedPreviousPeriod
class PassedInPreviousPeriodError(ValidationError):
pass
class FeedbackSetIsAlreadyGraded(PassedInPreviousPeriodError):
"""
Will be raised when a candidate is graded on the current assignment
"""
class SomeCandidatesDoesNotQualifyToPass(PassedInPreviousPeriodError):
"""
Will be raised when one or more candidates passed into :meth:`.PassedInPreviousPeriod.set_passed_in_current_period`
does not qualify to pass the assignment.
see meth:`.PassedInPreviousPeriod.get_queryset` the passed candidates will be crosschecked against the queryset
on submit.
"""
class NoCandidatesPassed(PassedInPreviousPeriodError):
"""
Will be raised when there is no candidates in queryset
passed into :meth:`.PassedInPreviousPeriod.set_passed_in_current_period`
"""
class MismatchOfNewAndOldCandidateId(PassedInPreviousPeriodError):
"""
Will be raised when the user connected to the new and old candidate does not
have the same ID.
"""
class PassedInPreviousPeriod(object):
#: Supported grading plugins is passfailed and points
SUPPORTED_GRADING_PLUGINS = [
Assignment.GRADING_SYSTEM_PLUGIN_ID_PASSEDFAILED,
Assignment.GRADING_SYSTEM_PLUGIN_ID_POINTS
]
def __init__(self, assignment, from_period, requestuser=None):
"""
Initialize with assignment and the earliest period we will approve for.
Args:
assignment: :class:`core.Assignment`
from_period: :class:`core.Period`
requestuser: The user that passed the request.
"""
self.assignment = assignment
self.from_period = from_period
self.requestuser = requestuser
def get_queryset(self):
"""
Gets the queryset for Candidates in previous periods that have passed the assignment,
1. Get all feedbacksets that have passed the assignment in previous periods,
and exclude feedbacksets in current period
2. Get all the assignment groups that have passed after ``self.from_period.start_time``
3. Get students on current assignment ``self.assignment`` and filter away candidates who have been graded.
4. Get all :class:`core.Candidate` objects that have passed the assignment in previous periods,
and ensure that the newest feedback is taken in account.
Returns:
:class:`core.Candidate` queryset
"""
feedbackset_queryset = FeedbackSet.objects.filter(
group__parentnode__parentnode__parentnode=self.assignment.subject,
group__parentnode__short_name=self.assignment.short_name,
grading_published_datetime__isnull=False,
group__parentnode__passing_grade_min_points__lte=models.F('grading_points')
).exclude(group__parentnode=self.assignment)\
.select_related('group__parentnode')
group_queryset = AssignmentGroup.objects.filter(
parentnode__parentnode__parentnode=self.assignment.subject,
parentnode__grading_system_plugin_id__in=self.SUPPORTED_GRADING_PLUGINS,
parentnode__parentnode__start_time__gte=self.from_period.start_time,
parentnode__short_name=self.assignment.short_name,
parentnode__parentnode__start_time__lt=self.assignment.parentnode.start_time,
cached_data__last_published_feedbackset__in=feedbackset_queryset
).select_related('parentnode__parentnode', 'cached_data__last_published_feedbackset')
students_on_current = Candidate.objects.filter(
assignment_group__parentnode__parentnode__parentnode=self.assignment.subject,
assignment_group__parentnode=self.assignment,
assignment_group__cached_data__last_published_feedbackset__isnull=True
).select_related('assignment_group__parentnode', 'relatedstudent__user')\
.values_list('relatedstudent__user', flat=True).distinct()
candidates = Candidate.objects.filter(
assignment_group__parentnode__parentnode__parentnode=self.assignment.subject,
assignment_group__in=group_queryset,
relatedstudent__user__in=students_on_current
).select_related('relatedstudent__user',
'assignment_group__parentnode__parentnode',
'assignment_group__cached_data')\
.order_by('relatedstudent__user', '-assignment_group__parentnode__publishing_time')\
.distinct('relatedstudent__user')
return candidates
def get_current_candidate_queryset(self, candidates):
"""
Gets a queryset with candidates :class:`core.Candidate` on current assignment ``self.assignment``
Args:
candidates: :class:`core.Candidate` candidates from previous assignments
Returns:
candidate queryset on current assignment
Raises:
:class:`.SomeCandidatesDoesNotQualifyToPass`
raises when one or more candidates does not qualify to pass the assignment
"""
selected_candidate_users = candidates.values_list('relatedstudent__user', flat=True)
if (self.get_queryset().filter(relatedstudent__user__in=selected_candidate_users).count() !=
len(selected_candidate_users)):
raise SomeCandidatesDoesNotQualifyToPass('Some of the selected students did not qualify to pass')
return Candidate.objects.filter(
assignment_group__parentnode=self.assignment,
relatedstudent__user__in=selected_candidate_users
).select_related('assignment_group',
'relatedstudent',
'assignment_group__cached_data__first_feedbackset')\
.order_by('relatedstudent__user')
def convert_points(self, old_feedbackset):
"""
Converts the points in ``old_feedbackset`` to current grading configuration in ``self.assignment``.
Decimals will be rounded up to give favor for the student.
Args:
old_feedbackset: :class:`devilry_group.FeedbackSet`
Returns:
Converted points.
"""
old_grading_points = old_feedbackset.grading_points
old_passing_grade_min_points = old_feedbackset.group.parentnode.passing_grade_min_points
old_max_points = old_feedbackset.group.parentnode.max_points
new_passing_grade_min_points = self.assignment.passing_grade_min_points
new_max_points = self.assignment.max_points
if old_max_points == old_grading_points:
return new_max_points
old_range = old_max_points - old_passing_grade_min_points
new_range = new_max_points - new_passing_grade_min_points
new_grading_points = ((old_grading_points - old_passing_grade_min_points) * new_range)
new_grading_points = new_grading_points / float(old_range) if old_range > 0 else + 0
new_grading_points = math.ceil(new_grading_points + new_passing_grade_min_points)
return new_grading_points if new_grading_points <= new_max_points else new_max_points
def __create_feedbackset_passed_previous_period(self, old_candidate, new_candidate):
"""
Creates a :class:`devilry_group.FeedbacksetPassedPreviousPeriod` model which contains
information about the passed assignment in previous period.
Args:
old_candidate: :class:`core.Candidate` the old candidate
new_candidate: :class:`core.Candidate` new candidate in current period
"""
old_assignment = old_candidate.assignment_group.parentnode
old_feedbackset = old_candidate.assignment_group.cached_data.last_feedbackset
old_period = old_candidate.assignment_group.parentnode.parentnode
FeedbacksetPassedPreviousPeriod(
feedbackset=new_candidate.assignment_group.cached_data.first_feedbackset,
passed_previous_period_type=FeedbacksetPassedPreviousPeriod.PASSED_PREVIOUS_SEMESTER_TYPES.AUTO.value,
assignment_short_name=old_assignment.short_name,
assignment_long_name=old_assignment.long_name,
assignment_max_points=old_assignment.max_points,
assignment_passing_grade_min_points=old_assignment.passing_grade_min_points,
period_short_name=old_period.short_name,
period_long_name=old_period.long_name,
period_start_time=old_period.start_time,
period_end_time=old_period.end_time,
grading_points=old_feedbackset.grading_points,
grading_published_by=old_feedbackset.grading_published_by,
grading_published_datetime=old_feedbackset.grading_published_datetime,
created_by=self.requestuser
).save()
def __publish_grading_on_current_assignment(self, old_candidate, new_candidate, published_by):
"""
Publish grading on current assignment ``self.assignment``
Args:
old_candidate: :class:`core.Candidate` the candidate in the previous passed assignment
new_candidate: :class:`core.Candidate` the candidate in the current period on assignment
published_by: will be published by this user
"""
grading_points = self.convert_points(old_candidate.assignment_group.cached_data.last_feedbackset)
new_candidate.assignment_group.cached_data.first_feedbackset.publish(published_by, grading_points)
def __set_passed(self, old_candidate, new_candidate, published_by):
"""
Publishes the :class:`devilry_group.FeedbackSet` in current period with grading
from previous period
Args:
old_candidate: :class:`core.Candidate` the candidate in the previous passed assignment
new_candidate: :class:`core.Candidate` the candidate in the current period on assignment
published_by: will be published by this user
"""
if new_candidate.relatedstudent.user_id != old_candidate.relatedstudent.user_id:
raise MismatchOfNewAndOldCandidateId
self.__create_feedbackset_passed_previous_period(old_candidate, new_candidate)
self.__publish_grading_on_current_assignment(old_candidate, new_candidate, published_by)
def set_passed_in_current_period(self, candidates, published_by):
"""
Takes a candidate queryset with candidates that will pass the assignment in current period
Args:
candidates: :class:`core.Candidate` queryset with selected candidates
that will pass the assignment in current period
published_by: will be published by this user
"""
if candidates.count() < 1:
raise NoCandidatesPassed('candidate queryset is empty!')
old_candidates_dict = {}
for candidate in candidates:
old_candidates_dict[candidate.relatedstudent.user_id] = candidate
with transaction.atomic():
for new_candidate in self.get_current_candidate_queryset(candidates):
self.__set_passed(old_candidates_dict[new_candidate.relatedstudent.user_id],
new_candidate, published_by)
| bsd-3-clause | 80187b9527028c10c5005d824c59558b | 45.930894 | 119 | 0.680381 | 4.340226 | false | false | false | false |
devilry/devilry-django | devilry/devilry_examiner/views/assignment/bulkoperations/bulk_operations_grouplist.py | 1 | 11298 | # -*- coding: utf-8 -*-
from cradmin_legacy.viewhelpers import multiselect2, multiselect2view
from django import forms
from django.db import models
from django.db.models.functions import Concat, Lower
from django.utils.translation import gettext_lazy, pgettext_lazy
from devilry.apps.core import models as core_models
from devilry.devilry_comment.editor_widget import DevilryMarkdownNoPreviewWidget
from devilry.devilry_cradmin import devilry_listbuilder, devilry_listfilter
class SelectedAssignmentGroupForm(forms.Form):
qualification_modelclass = core_models.AssignmentGroup
invalid_qualification_item_message = pgettext_lazy(
'selected_assignment_group_form error_message',
'Invalid assignment group items was selected.'
)
#: The items selected as ModelMultipleChoiceField.
#: If some or all items should be selected by default, override this.
selected_items = forms.ModelMultipleChoiceField(
# No items are selectable by default.
queryset=None,
# Used if the object to select for some reason does
# not exist(has been deleted or altered in some way)
error_messages={
'invalid_choice': invalid_qualification_item_message,
}
)
#: A wysiwig editor for writing a feedback message.
feedback_comment_text = forms.CharField(
widget=DevilryMarkdownNoPreviewWidget(),
help_text=gettext_lazy('Add a general comment to the feedback'),
initial=gettext_lazy('Delivery has been corrected.'),
label=False
)
def __init__(self, *args, **kwargs):
selectable_qualification_items_queryset = kwargs.pop('selectable_items_queryset')
self.assignment = kwargs.pop('assignment')
super(SelectedAssignmentGroupForm, self).__init__(*args, **kwargs)
self.fields['selected_items'].queryset = selectable_qualification_items_queryset
class AssignmentGroupTargetRenderer(multiselect2.target_renderer.Target):
#: The selected item as it is shown when selected.
#: By default this is :class:`.SelectedQualificationItem`.
selected_target_renderer = devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue
#: A descriptive name for the items selected.
descriptive_item_name = gettext_lazy('assignment group')
def get_submit_button_text(self):
return pgettext_lazy(
'assignment_group_target_renderer submit_button_text',
'Submit selected %(what)s'
) % {'what': self.descriptive_item_name}
def get_with_items_title(self):
return pgettext_lazy(
'assignment_group_target_renderer with_items_title',
'Selected %(what)s'
) % {'what': self.descriptive_item_name}
def get_without_items_text(self):
return pgettext_lazy(
'assignment_group_target_renderer without_items_text',
'No %(what)s selected'
) % {'what': self.descriptive_item_name}
def get_field_layout(self):
return [
'feedback_comment_text'
]
class AbstractAssignmentGroupMultiSelectListFilterView(multiselect2view.ListbuilderFilterView):
"""
Abstract class that implements ``ListbuilderFilterView``.
Adds anonymization and activity filters for the ``AssignmentGroup``s.
Fetches the ``AssignmentGroups`` through :meth:`~.get_unfiltered_queryset_for_role` and joins
necessary tables used for anonymzation and annotations used by viewfilters.
"""
model = core_models.AssignmentGroup
def dispatch(self, request, *args, **kwargs):
self.assignment = self.request.cradmin_role
return super(AbstractAssignmentGroupMultiSelectListFilterView, self).dispatch(request, *args, **kwargs)
def get_default_paginate_by(self, queryset):
return 5
def __add_filterlist_items_anonymous_uses_custom_candidate_ids(self, filterlist):
filterlist.append(devilry_listfilter.assignmentgroup.SearchAnonymousUsesCustomCandidateIds())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByAnonymousUsesCustomCandidateIds())
def __add_filterlist_items_anonymous(self, filterlist):
filterlist.append(devilry_listfilter.assignmentgroup.SearchAnonymous())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByAnonymous(include_points=False))
def __add_filterlist_items_not_anonymous(self, filterlist):
filterlist.append(devilry_listfilter.assignmentgroup.SearchNotAnonymous())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByNotAnonymous(include_points=False))
def __add_anonymization_filters_for_items(self, filterlist):
"""
Adds filters based on the :attr:`~.devilry.apps.core.models.anonymizationmode` of the
``Assignment``.
"""
if self.assignment.uses_custom_candidate_ids:
self.__add_filterlist_items_anonymous_uses_custom_candidate_ids(filterlist=filterlist)
else:
self.__add_filterlist_items_anonymous(filterlist=filterlist)
def add_filterlist_items(self, filterlist):
"""
Adds filters to use in the view.
Override this to add more filters
"""
if self.assignment.is_anonymous:
self.__add_anonymization_filters_for_items(filterlist=filterlist)
else:
self.__add_filterlist_items_not_anonymous(filterlist=filterlist)
filterlist.append(devilry_listfilter.assignmentgroup.ActivityFilter())
def get_candidate_queryset(self):
return core_models.Candidate.objects\
.select_related('relatedstudent__user')\
.only(
'candidate_id',
'assignment_group',
'relatedstudent__candidate_id',
'relatedstudent__automatic_anonymous_id',
'relatedstudent__user__shortname',
'relatedstudent__user__fullname',
)\
.order_by(
Lower(
Concat(
'relatedstudent__user__fullname',
'relatedstudent__user__shortname',
output_field=models.CharField()
)))
def get_examiner_queryset(self):
return core_models.Examiner.objects\
.select_related('relatedexaminer__user')\
.only(
'relatedexaminer',
'assignmentgroup',
'relatedexaminer__automatic_anonymous_id',
'relatedexaminer__user__shortname',
'relatedexaminer__user__fullname',
)\
.order_by(
Lower(
Concat(
'relatedexaminer__user__fullname',
'relatedexaminer__user__shortname',
output_field=models.CharField()
)))
def get_annotations_for_queryset(self, queryset):
"""
Add annotations for the the queryset.
This function is called in ``get_unfiltered_queryset_for_role()``
Args:
queryset (QuerySet): Add annotations to.
Returns:
(QuerySet): annotated queryset.
"""
return queryset \
.annotate_with_is_waiting_for_feedback_count() \
.annotate_with_is_waiting_for_deliveries_count() \
.annotate_with_is_corrected_count() \
.annotate_with_number_of_private_groupcomments_from_user(user=self.request.user) \
.annotate_with_number_of_private_imageannotationcomments_from_user(user=self.request.user)
def get_unfiltered_queryset_for_role(self, role):
"""
Get unfiltered ``QuerySet`` of :obj:`~.devilry.apps.core.models.AssignmentGroup`s.
Override this with a call to super and more filters to the queryset.
Args:
role (:class:`~.devilry.apps.core.models.Assignment`): cradmin role.
Returns:
(QuerySet): ``QuerySet`` of ``AssignmentGroups``.
"""
group_queryset = core_models.AssignmentGroup.objects \
.filter(parentnode=role) \
.prefetch_related(
models.Prefetch('candidates',
queryset=self.get_candidate_queryset())) \
.prefetch_related(
models.Prefetch('examiners',
queryset=self.get_examiner_queryset()))
return self.get_annotations_for_queryset(queryset=group_queryset)\
.distinct() \
.select_related('cached_data__last_published_feedbackset',
'cached_data__last_feedbackset',
'cached_data__first_feedbackset',
'parentnode')
def get_value_and_frame_renderer_kwargs(self):
return {
'assignment': self.assignment
}
def get_form_kwargs(self):
kwargs = super(AbstractAssignmentGroupMultiSelectListFilterView, self).get_form_kwargs()
kwargs['selectable_items_queryset'] = self.get_unfiltered_queryset_for_role(self.request.cradmin_role)
kwargs['assignment'] = self.request.cradmin_role
return kwargs
def get_selected_groupids(self, posted_form):
return [item.id for item in posted_form.cleaned_data['selected_items']]
def get_feedbackset_ids_from_posted_ids(self, form):
"""
Get list of ids of the last :class:`~.devilry.devilry_group.models.FeedbackSet` from each ``AssignmentGroup``
in ``form``s cleaned data.
Args:
form: cleaned form.
Returns:
(list): list of ``FeedbackSet`` ids.
"""
group_ids = self.get_selected_groupids(posted_form=form)
feedback_set_ids = self.get_unfiltered_queryset_for_role(role=self.request.cradmin_role) \
.filter(id__in=group_ids) \
.values_list('cached_data__last_feedbackset_id', flat=True)
return list(feedback_set_ids)
def get_group_displaynames(self, form):
"""
Build a list of short displaynames for the groups that where corrected.
Notes:
Display names are anonymous if the
:attr:`~.devilry.apps.core.models.assignemnt.Assignment.anonymizationmode` is not ``ANONYMIZATIONMODE_OFF``.
Args:
form: posted form
Returns:
(list): list of short displaynames for the groups
"""
groups = form.cleaned_data['selected_items']
display_names = [group.short_displayname for group in groups]
return display_names
def get_success_url(self):
"""
Defaults to the apps indexview.
"""
return str(self.request.cradmin_app.reverse_appindexurl())
def form_valid(self, form):
return super(AbstractAssignmentGroupMultiSelectListFilterView, self).form_valid(form)
def get_filterlist_url(self, filters_string):
raise NotImplementedError()
def add_success_message(self, anonymous_display_names):
"""
Add list of anonymized displaynames of the groups that received feedback.
Args:
anonymous_display_names (list): List of anonymized displaynames for groups.
"""
raise NotImplementedError()
| bsd-3-clause | 7be15b916017e1b1312328fb4b2e4ed9 | 38.642105 | 120 | 0.638874 | 4.271456 | false | false | false | false |
devilry/devilry-django | devilry/devilry_deadlinemanagement/views/multiselect_groups_view.py | 1 | 8626 | # -*- coding: utf-8 -*-
from crispy_forms import layout
from django import forms
from django.utils.translation import gettext_lazy, pgettext_lazy
from cradmin_legacy.viewhelpers import multiselect2
from cradmin_legacy.viewhelpers import multiselect2view
from devilry.apps.core import models as core_models
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_cradmin import devilry_listfilter
from devilry.utils import datetimeutils
from devilry.devilry_deadlinemanagement.views import viewutils
class SelectedAssignmentGroupForm(forms.Form):
qualification_modelclass = core_models.AssignmentGroup
invalid_qualification_item_message = gettext_lazy(
'Something went wrong. This may happen if someone else performed a similar operation '
'while you where selecting. Refresh the page and try again')
#: The items selected as ModelMultipleChoiceField.
#: If some or all items should be selected by default, override this.
selected_items = forms.ModelMultipleChoiceField(
# No items are selectable by default.
queryset=None,
# Used if the object to select for some reason does
# not exist(has been deleted or altered in some way)
error_messages={
'invalid_choice': invalid_qualification_item_message,
}
)
def __init__(self, *args, **kwargs):
selectable_qualification_items_queryset = kwargs.pop('selectable_items_queryset')
self.assignment = kwargs.pop('assignment')
super(SelectedAssignmentGroupForm, self).__init__(*args, **kwargs)
self.fields['selected_items'].queryset = selectable_qualification_items_queryset
class AssignmentGroupTargetRenderer(multiselect2.target_renderer.Target):
#: The selected item as it is shown when selected.
#: By default this is :class:`.SelectedQualificationItem`.
selected_target_renderer = devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue
#: A descriptive name for the items selected.
descriptive_item_name = gettext_lazy('groups')
def get_move_deadline_text(self):
return pgettext_lazy(
'assignment_group_target_renderer move_dealine_text',
'Move deadline for selected %(what)s'
) % {'what': self.descriptive_item_name}
def get_submit_button_text(self):
return pgettext_lazy(
'assignment_group_target_renderer submit_button_text',
'Continue with selected %(what)s'
) % {'what': self.descriptive_item_name}
def get_with_items_title(self):
return pgettext_lazy(
'assignment_group_target_renderer with_items_title',
'Selected %(what)s'
) % {'what': self.descriptive_item_name}
def get_without_items_text(self):
return pgettext_lazy(
'assignment_group_target_renderer without_items_text',
'No %(what)s selected'
) % {'what': self.descriptive_item_name}
def get_hidden_fields(self):
return [
layout.Hidden(name='post_type_received_data', value='')
]
class AssignmentGroupMultiSelectListFilterView(viewutils.DeadlineManagementMixin, multiselect2view.ListbuilderFilterView):
"""
Abstract class that implements ``ListbuilderFilterView``.
Adds anonymization and activity filters for the ``AssignmentGroup``s.
Fetches the ``AssignmentGroups`` through :meth:`~.get_unfiltered_queryset_for_role` and joins
necessary tables used for anonymzation and annotations used by viewfilters.
"""
model = core_models.AssignmentGroup
value_renderer_class = devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue
template_name = 'devilry_deadlinemanagement/deadline-bulk-multiselect-filterlistview.django.html'
handle_deadline_type = None
def get_pagetitle(self):
return pgettext_lazy('assignment_group_multiselect_list_filter_view pagetitle',
'Select groups')
def get_pageheading(self):
return pgettext_lazy('assignment_group_multiselect_list_filter_view pageheading',
'Select groups')
def get_page_subheading(self):
return pgettext_lazy('assignment_group_multiselect_list_filter_view page_subheading',
'Select the groups you want to manage the deadline for.')
def get_default_paginate_by(self, queryset):
return 5
def __add_filterlist_items_anonymous_uses_custom_candidate_ids(self, filterlist):
filterlist.append(devilry_listfilter.assignmentgroup.SearchAnonymousUsesCustomCandidateIds())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByAnonymousUsesCustomCandidateIds())
def __add_filterlist_items_anonymous(self, filterlist):
filterlist.append(devilry_listfilter.assignmentgroup.SearchAnonymous())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByAnonymous(include_points=False))
def __add_filterlist_items_not_anonymous(self, filterlist):
filterlist.append(devilry_listfilter.assignmentgroup.SearchNotAnonymous())
filterlist.append(devilry_listfilter.assignmentgroup.OrderByNotAnonymous(include_points=False))
def __add_anonymization_filters_for_items(self, filterlist):
"""
Adds filters based on the :attr:`~.devilry.apps.core.models.anonymizationmode` of the
``Assignment``.
"""
if self.assignment.uses_custom_candidate_ids:
self.__add_filterlist_items_anonymous_uses_custom_candidate_ids(filterlist=filterlist)
else:
self.__add_filterlist_items_anonymous(filterlist=filterlist)
def add_filterlist_items(self, filterlist):
super(AssignmentGroupMultiSelectListFilterView, self).add_filterlist_items(filterlist)
if self.assignment.is_anonymous:
self.__add_anonymization_filters_for_items(filterlist=filterlist)
else:
self.__add_filterlist_items_not_anonymous(filterlist=filterlist)
filterlist.append(devilry_listfilter.assignmentgroup.ActivityFilter())
filterlist.append(devilry_listfilter.assignmentgroup.IsPassingGradeFilter())
filterlist.append(devilry_listfilter.assignmentgroup.PointsFilter())
def get_unfiltered_queryset_for_role(self, role):
return self.get_queryset_for_role_on_handle_deadline_type(role=role)
def get_target_renderer_class(self):
return AssignmentGroupTargetRenderer
def get_form_class(self):
return SelectedAssignmentGroupForm
def get_value_and_frame_renderer_kwargs(self):
return {
'assignment': self.assignment
}
def get_form_kwargs(self):
kwargs = super(AssignmentGroupMultiSelectListFilterView, self).get_form_kwargs()
kwargs['selectable_items_queryset'] = self.get_unfiltered_queryset_for_role(self.assignment)
kwargs['assignment'] = self.assignment
return kwargs
def get_target_renderer_kwargs(self):
kwargs = super(AssignmentGroupMultiSelectListFilterView, self).get_target_renderer_kwargs()
kwargs['form_action'] = self.request.cradmin_app.reverse_appurl(
viewname='manage-deadline-post',
kwargs={
'deadline': datetimeutils.datetime_to_url_string(self.deadline),
'handle_deadline': self.handle_deadline_type
})
return kwargs
def get_selected_groupids(self, posted_form):
return [item.id for item in posted_form.cleaned_data['selected_items']]
def get_group_anonymous_displaynames(self, form):
"""
Build a list of anonymized displaynames for the groups that where corrected.
Args:
form: posted form
Returns:
(list): list of anonymized displaynames for the groups
"""
groups = form.cleaned_data['selected_items']
anonymous_display_names = [
str(group.get_anonymous_displayname(assignment=self.assignment))
for group in groups]
return anonymous_display_names
def get_success_url(self):
"""
Defaults to the apps indexview.
"""
return str(self.request.cradmin_app.reverse_appindexurl())
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'select-groups-manually-filter', kwargs={
'deadline': datetimeutils.datetime_to_url_string(self.deadline),
'handle_deadline': self.handle_deadline_type,
'filters_string': filters_string
})
| bsd-3-clause | 0bdd6b4f0ed3245cafaaeb30c3b4057b | 41.284314 | 122 | 0.692326 | 4.123327 | false | false | false | false |
devilry/devilry-django | devilry/devilry_admin/views/assignment/passed_previous_period/__init__.py | 1 | 1276 | from cradmin_legacy import crapp
from devilry.devilry_admin.views.assignment.passed_previous_period import overview
from devilry.devilry_admin.views.assignment.passed_previous_period import passed_previous_period
from devilry.devilry_admin.views.assignment.passed_previous_period import passed_previous_semester_manual
class App(crapp.App):
appurls = [
crapp.Url(
r'^$',
overview.Overview.as_view(),
name=crapp.INDEXVIEW_NAME
),
# Auto pass students on selected period.
crapp.Url(
r'^select-period$',
passed_previous_period.SelectPeriodView.as_view(),
name='select_period'),
crapp.Url(
r'^assignment/(?P<period_id>\d+)$',
passed_previous_period.PassedPreviousAssignmentView.as_view(),
name='assignments'),
crapp.Url(
r'^confirm/(?P<period_id>\d+)$',
passed_previous_period.ApprovePreviousAssignments.as_view(),
name='confirm'),
# Manually select students to pass.
crapp.Url(r'^select-groups/(?P<filters_string>.+)?$',
passed_previous_semester_manual.PassAssignmentGroupsView.as_view(),
name='manually_select_groups'),
] | bsd-3-clause | 05af5ee6ec6cced57dbbf0028e57d176 | 36.558824 | 105 | 0.623041 | 3.820359 | false | false | false | false |
devilry/devilry-django | devilry/apps/core/models/delivery.py | 1 | 13166 | from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q, Max
from django.utils import timezone
from django.utils.translation import gettext_lazy
from . import deliverytypes
from .deadline import Deadline
from .filemeta import FileMeta
from . import AbstractIsAdmin, AbstractIsExaminer, AbstractIsCandidate
class DeliveryQuerySet(models.QuerySet):
"""
Returns a queryset with all Deliveries where the given ``user`` is examiner.
"""
def filter_is_examiner(self, user):
return self.filter(deadline__assignment_group__examiners__relateduser__user=user).distinct()
def filter_is_candidate(self, user):
return self.filter(deadline__assignment_group__candidates__relatedstudent__user=user).distinct()
def filter_is_active(self):
now = timezone.now()
return self.filter(
deadline__assignment_group__parentnode__publishing_time__lt=now,
deadline__assignment_group__parentnode__parentnode__start_time__lt=now,
deadline__assignment_group__parentnode__parentnode__end_time__gt=now).distinct()
def filter_examiner_has_access(self, user):
return self.filter_is_active().filter_is_examiner(user)
class DeliveryManager(models.Manager):
def get_queryset(self):
return DeliveryQuerySet(self.model, using=self._db)
def filter_is_candidate(self, user):
return self.get_queryset().filter_is_candidate(user)
def filter_is_examiner(self, user):
"""
Returns a queryset with all Deliveries where the given ``user`` is examiner.
WARNING: You should normally not use this alone because it gives the
examiner information from expired periods (which they are not supposed
to get). Use :meth:`.filter_examiner_has_access` instead.
"""
return self.get_queryset().filter_is_examiner(user)
def filter_is_active(self):
"""
Returns a queryset with all Deliveries on active Assignments.
"""
return self.get_queryset().filter_is_active()
def filter_examiner_has_access(self, user):
"""
Returns a queryset with all Deliveries on active Assignments
where the given ``user`` is examiner.
NOTE: This returns all groups that the given ``user`` has examiner-rights for.
"""
return self.get_queryset().filter_examiner_has_access(user)
class Delivery(models.Model, AbstractIsAdmin, AbstractIsCandidate, AbstractIsExaminer):
""" A class representing a given delivery from an `AssignmentGroup`_.
How to create a delivery::
deadline = Deadline.objects.get(....)
candidate = Candidate.objects.get(....)
delivery = Delivery(
deadline=deadline,
delivered_by=candidate)
delivery.set_number()
delivery.full_clean()
delivery.save()
.. attribute:: time_of_delivery
A django.db.models.DateTimeField_ that holds the date and time the
Delivery was uploaded.
.. attribute:: deadline
A django.db.models.ForeignKey_ pointing to the `Deadline`_ for this Delivery.
.. attribute:: number
A django.db.models.fields.PositiveIntegerField with the delivery-number
within this assignment-group. This number is automatically
incremented within each assignmentgroup, starting from 1. Must be
unique within the assignment-group. Automatic incrementation is used
if number is None when calling :meth:`save`.
.. attribute:: delivered_by
A django.db.models.ForeignKey_ pointing to the user that uploaded
the Delivery
.. attribute:: successful
A django.db.models.BooleanField_ telling whether or not the Delivery
was successfully uploaded.
.. attribute:: after_deadline
A django.db.models.BooleanField_ telling whether or not the Delivery
was delived after deadline..
.. attribute:: filemetas
A set of :class:`filemetas <devilry.apps.core.models.FileMeta>` for this delivery.
.. attribute:: feedbacks
A set of :class:`feedbacks <devilry.apps.core.models.StaticFeedback>` on this delivery.
.. attribute:: etag
A DateTimeField containing the etag for this object.
.. attribute:: copy_of
Link to a delivery that this delivery is a copy of. This is set by :meth:`.Delivery.copy`.
.. attribute:: last_feedback
The last `StaticFeedback`_ on this delivery. This is updated each time a feedback is added.
.. attribute:: copy_of
If this delivery is a copy of another delivery, this ForeignKey points to that other delivery.
.. attribute:: copies
The reverse of ``copy_of`` - a queryset that returns all copies of this delivery.
"""
# DELIVERY_NOT_CORRECTED = 0
# DELIVERY_CORRECTED = 1
objects = DeliveryManager()
delivery_type = models.PositiveIntegerField(
default=deliverytypes.ELECTRONIC,
verbose_name="Type of delivery",
help_text='0: Electronic delivery, 1: Non-electronic delivery, 2: Alias delivery. Default: 0.')
time_of_delivery = models.DateTimeField(
verbose_name=gettext_lazy('Time of delivery'),
help_text='Holds the date and time the Delivery was uploaded.',
default=timezone.now)
deadline = models.ForeignKey(
Deadline, related_name='deliveries',
verbose_name=gettext_lazy('Deadline'), on_delete=models.CASCADE)
number = models.PositiveIntegerField(
help_text='The delivery-number within this assignment-group. This number is automatically '
'incremented within each AssignmentGroup, starting from 1. Always '
'unique within the assignment-group.')
# Fields set by user
successful = models.BooleanField(blank=True, default=True,
help_text='Has the delivery and all its files been uploaded successfully?')
delivered_by = models.ForeignKey(
"Candidate", blank=True, null=True,
on_delete=models.SET_NULL,
help_text='The candidate that delivered this delivery. If this is None, '
'the delivery was made by an administrator for a student.')
# Only used when this is aliasing an earlier delivery, delivery_type == ALIAS
alias_delivery = models.ForeignKey("Delivery", blank=True, null=True,
on_delete=models.SET_NULL,
help_text='Links to another delivery. Used when delivery_type is Alias.')
copy_of = models.ForeignKey(
"Delivery", blank=True, null=True,
related_name='copies',
on_delete=models.SET_NULL,
help_text='Link to a delivery that this delivery is a copy of. This is set by the copy-method.')
last_feedback = models.OneToOneField("StaticFeedback", blank=True, null=True,
related_name='latest_feedback_for_delivery', on_delete=models.CASCADE)
def _delivered_too_late(self):
""" Compares the deadline and time of delivery.
If time_of_delivery is greater than the deadline, return True.
"""
return self.time_of_delivery > self.deadline.deadline
after_deadline = property(_delivered_too_late)
class Meta:
app_label = 'core'
verbose_name = 'Delivery'
verbose_name_plural = 'Deliveries'
ordering = ['-time_of_delivery']
# unique_together = ('assignment_group', 'number')
@classmethod
def q_is_candidate(cls, user_obj):
"""
Returns a django.models.Q object matching Deliveries where
the given student is candidate.
"""
return Q(successful=True) & Q(deadline__assignment_group__candidates__student=user_obj)
@classmethod
def q_published(cls, old=True, active=True):
now = timezone.now()
q = Q(deadline__assignment_group__parentnode__publishing_time__lt=now)
if not active:
q &= ~Q(deadline__assignment_group__parentnode__parentnode__end_time__gte=now)
if not old:
q &= ~Q(deadline__assignment_group__parentnode__parentnode__end_time__lt=now)
return q
@classmethod
def q_is_examiner(cls, user_obj):
return Q(successful=True) & Q(deadline__assignment_group__examiners__user=user_obj)
@property
def is_last_delivery(self):
"""
Returns ``True`` if this is the last delivery for this AssignmentGroup.
"""
try:
last_delivery = Delivery.objects \
.filter(deadline__assignment_group_id=self.deadline.assignment_group_id) \
.order_by('-time_of_delivery').first()
return last_delivery == self
except Delivery.DoesNotExist:
return False
@property
def assignment_group(self):
"""
Shortcut for ``self.deadline.assignment_group.assignment``.
"""
return self.deadline.assignment_group
@property
def assignment(self):
"""
Shortcut for ``self.deadline.assignment_group.assignment``.
"""
return self.assignment_group.assignment
def add_file(self, filename, iterable_data):
""" Add a file to the delivery.
:param filename:
A filename as defined in :class:`FileMeta`.
:param iterable_data:
A iterable yielding data that can be written to file using the
write() method of a storage backend (byte strings).
"""
filemeta = FileMeta()
filemeta.delivery = self
filemeta.filename = filename
filemeta.size = 0
filemeta.save()
f = FileMeta.deliverystore.write_open(filemeta)
filemeta.save()
for data in iterable_data:
f.write(data.decode('utf-8'))
filemeta.size += len(data)
f.close()
filemeta.save()
return filemeta
def set_number(self):
m = Delivery.objects.filter(deadline__assignment_group=self.deadline.assignment_group).aggregate(Max('number'))
self.number = (m['number__max'] or 0) + 1
def set_time_of_delivery_to_now(self):
self.time_of_delivery = timezone.now().replace(microsecond=0, tzinfo=None)
def clean(self):
""" Validate the delivery. """
if self.delivery_type == deliverytypes.ALIAS:
if not self.alias_delivery and not self.feedbacks.exists():
raise ValidationError('A Delivery with delivery_type=ALIAS must have an alias_delivery or feedback.')
super(Delivery, self).clean()
def __str__(self):
return ('Delivery(id={id}, number={number}, group={group}, '
'time_of_delivery={time_of_delivery})').format(id=self.id,
group=self.deadline.assignment_group,
number=self.number,
time_of_delivery=self.time_of_delivery.isoformat())
def copy(self, newdeadline):
"""
Copy this delivery, including all FileMeta's and their files, and all
feedbacks into ``newdeadline``. Sets the ``copy_of`` attribute of the
created delivery.
.. note:: Always run this in a transaction.
.. warning::
This does not autoset the latest feedback as ``feedback`` or
the ``last_delivery`` on the group.
You need to handle that yourself after the copy.
:return: The newly created, cleaned and saved delivery.
"""
deliverycopy = Delivery(deadline=newdeadline,
delivery_type=self.delivery_type,
number=self.number,
successful=self.successful,
time_of_delivery=self.time_of_delivery,
delivered_by=self.delivered_by,
alias_delivery=self.alias_delivery,
last_feedback=None,
copy_of=self)
def save_deliverycopy():
deliverycopy.save()
deliverycopy.full_clean()
save_deliverycopy()
for filemeta in self.filemetas.all():
filemeta.copy(deliverycopy)
for index, staticfeedback in enumerate(self.feedbacks.order_by('-save_timestamp')):
staticfeedbackcopy = staticfeedback.copy(deliverycopy)
if index == 0:
deliverycopy.last_feedback = staticfeedbackcopy
save_deliverycopy()
return deliverycopy
def is_electronic(self):
"""
Returns ``True`` if :attr:`Delivery.delivery_type` is ``0`` (electric).
"""
return self.delivery_type == deliverytypes.ELECTRONIC
def is_nonelectronic(self):
"""
Returns ``True`` if :attr:`Delivery.delivery_type` is ``1`` (non-electric).
"""
return self.delivery_type == deliverytypes.NON_ELECTRONIC
| bsd-3-clause | 54bbe676f3d19a132b0c050939a03134 | 37.609971 | 119 | 0.623728 | 4.362492 | false | false | false | false |
devilry/devilry-django | devilry/apps/core/models/candidate.py | 1 | 4220 | from django.conf import settings
from django.db import models
from devilry.apps.core.models import RelatedStudent
from devilry.devilry_account.models import User
class CandidateQuerySet(models.QuerySet):
def filter_has_passing_grade(self, assignment):
"""
Filter only :class:`.Candidate` objects within the given
assignment that has a passing grade.
That means that this filters out all Candidates on AssignmentGroups
the latest published :class:`devilry.devilry_group.models.FeedbackSet`
has less :obj:`devilry.devilry_group.models.FeedbackSet.grading_points`
than the ``passing_grade_min_points`` for the assignment.
This method performs ``filter(assignment_group__parentnode=assignment)``
in addition to the query that checks the feedbacksets.
Args:
assignment: A :class:`devilry.apps.core.models.assignment.Assignment` object.
"""
return self.filter(assignment_group__parentnode=assignment)\
.extra(
where=[
"""
(
SELECT devilry_group_feedbackset.grading_points
FROM devilry_group_feedbackset
WHERE
devilry_group_feedbackset.group_id = core_candidate.assignment_group_id
AND
devilry_group_feedbackset.grading_published_datetime IS NOT NULL
ORDER BY devilry_group_feedbackset.grading_published_datetime DESC
LIMIT 1
) >= %s
"""
],
params=[
assignment.passing_grade_min_points
]
)
class Candidate(models.Model):
"""
A student within an AssignmentGroup.
A candidate is a many-to-many between :class:`devilry.apps.core.models.AssignmentGroup`
and a user.
"""
objects = CandidateQuerySet.as_manager()
class Meta:
app_label = 'core'
#: Will be removed in 3.0 - see https://github.com/devilry/devilry-django/issues/810
old_reference_not_in_use_student = models.ForeignKey(User, null=True, default=None, blank=True, on_delete=models.CASCADE)
#: ForeignKey to :class:`devilry.apps.core.models.relateduser.RelatedStudent`
#: (the model that ties User as student on a Period).
relatedstudent = models.ForeignKey(RelatedStudent, on_delete=models.CASCADE)
#: The :class:`devilry.apps.core.models.assignment_group.AssignmentGroup`
#: where this candidate belongs.
assignment_group = models.ForeignKey(
'AssignmentGroup',
related_name='candidates', on_delete=models.CASCADE)
#: A candidate ID imported from a third party system.
#: Only used if ``uses_custom_candidate_ids==True`` on the assignment.
candidate_id = models.CharField(
max_length=30, blank=True, null=True,
help_text='An optional candidate id. This can be anything as long as it '
'is less than 30 characters. Used to show the user on anonymous assignmens.')
def get_anonymous_name(self, assignment=None):
"""
Get the anonymous name of this candidate.
Args:
assignment: An optional :class:`devilry.apps.core.models.assignment.Assignment`.
if this is provided, we use this instead of looking up
``assignment_group.parentnode``. This is essential for views
that list many candidates since it avoid extra database lookups.
"""
if assignment is None:
assignment = self.assignment_group.parentnode
if assignment.uses_custom_candidate_ids:
if self.candidate_id:
return self.candidate_id
else:
return self.relatedstudent.get_automatic_anonymous_id_with_fallback()
else:
return self.relatedstudent.get_anonymous_name()
def __str__(self):
return 'Candiate id={id}, student={student}, group={group}'.format(
id=self.id,
student=self.relatedstudent,
group=self.assignment_group)
| bsd-3-clause | 7de417880fcb4bddf0c3cd15fe0133b9 | 39.970874 | 125 | 0.620616 | 4.446786 | false | false | false | false |
devilry/devilry-django | devilry/utils/GroupNodes.py | 1 | 3336 | from . import OrderedDict
def group_assignmentgroups(assignment_group_list):
"""
Groups a list of assignment_groups.
"""
return group_nodes(assignment_group_list, 2)
def group_assignments(assignment_list):
"""
Groups a list of assignments.
"""
return group_nodes(assignment_list, 1)
def group_nodes(node_list, tree_height):
"""
Groups a list of nodes.
"""
dict = OrderedDict()
for node in node_list:
nodelist = _make_node_list(GroupNode(node), tree_height)
if nodelist.get_name() not in dict:
dict[nodelist.get_name()] = nodelist
else:
dict[nodelist.get_name()].merge(nodelist)
return list(dict.values()) # we usually need to know the length, so values() instead of itervalues()
def _make_node_list(child_node, list_count):
"""
Creates a list of GroupNodes. This is used by the method group_nodes before
creating the tree of GroupNodes.
"""
parent = GroupNode(child_node.node.parentnode)
parent.add_child(child_node)
if list_count == 0:
return parent
else:
list_count -= 1
return _make_node_list(parent, list_count)
def print_tree(node, depth=1):
"""
Print the tree of GroupNodes.
"""
for child in node:
print(" " * depth + child.get_name())
print_tree(child, depth+1)
class GroupNode(object):
"""
.. attribute:: children
The :class:`OrderedDict` containing all the children of this node.
.. attribute:: node
The node element of this GroupNode.
"""
def __init__(self, node):
self.children = OrderedDict()
self.node = node
self.display_group = False
def __str__(self):
if hasattr(self.node, 'short_name'):
return self.node.short_name
elif hasattr(self.node, 'long_name'):
return self.node.long_name
else:
if self.display_group:
return self.node.parentnode.long_name + " (" + self.node.get_candidates() + ")"
else:
return self.node.parentnode.long_name
def get_name(self):
return self.__str__()
def add_child(self, child_node):
"""
Add a child to this node.
"""
# Assignment group doesn't have short_name
if not hasattr(child_node.node, 'short_name'):
# Makes sure the candidates are shown if a student
# is part of more than one AssignmentGroup
if len(self.children) != 0:
child_node.display_group = True
# Contains only one, set display_group to True for that element as well.
if len(self.children) == 1:
list(self.children.values())[0].display_group = True
self.children[child_node] = child_node
else:
if child_node.get_name() not in self.children:
self.children[child_node.get_name()] = child_node
else:
self.children[child_node.get_name()].merge(child_node)
def merge(self, list):
"""
Merge the children of this node with the elements of the list.
"""
for n in list:
self.add_child(n)
def __iter__(self):
return iter(list(self.children.values()))
| bsd-3-clause | 77478b8f6c5c135cc607bdfa34bc3194 | 30.17757 | 104 | 0.579736 | 4.01444 | false | false | false | false |
devilry/devilry-django | devilry/devilry_rest/testclient.py | 1 | 1493 | from django.test import Client
import json
class RestClient(Client):
"""
Extends the ``django.test.Client`` with methods for the REST verbs and
application/json.
"""
def _load_json(self, content):
if content.strip() == '':
return None
try:
return json.loads(content)
except ValueError as e:
raise ValueError('{0}: {1}'.format(e, content))
def rest_post(self, url, data):
response = self.post(url,
data=json.dumps(data),
content_type="application/json",
HTTP_ACCEPT="application/json")
return self._load_json(response.content), response
def rest_put(self, url, data, **extra):
response = self.put(url,
data=json.dumps(data),
content_type="application/json",
HTTP_ACCEPT="application/json",
**extra)
return self._load_json(response.content), response
def rest_get(self, url, **data):
response = self.get(url,
data=data,
HTTP_ACCEPT="application/json")
return self._load_json(response.content), response
def rest_delete(self, url):
response = self.delete(url,
HTTP_ACCEPT="application/json")
return self._load_json(response.content), response
| bsd-3-clause | e69e8d6fa4c5079282d04227eda92be4 | 33.72093 | 74 | 0.519759 | 4.680251 | false | false | false | false |
devilry/devilry-django | devilry/apps/core/models/custom_db_fields.py | 1 | 1523 | import re
from django.utils.translation import gettext_lazy
from django.core.exceptions import ValidationError
from django.db import models
class ShortNameField(models.SlugField):
""" Short name field used by several of the core models.
We have a hierarchy of objects with a short name, but they are not
strictly equal (eg. we cannot use a superclass because Subject has a
unique short_name).
"""
patt = re.compile(r'^[a-z0-9_-]+$')
def __init__(self, *args, **kwargs):
kw = dict(
max_length=20,
verbose_name=gettext_lazy('Short name'),
db_index=True,
help_text=gettext_lazy('Up to 20 letters of lowercase english letters (a-z), '
'numbers, underscore ("_") and hyphen ("-"). Used when the '
'name takes too much space.')
)
kw.update(kwargs)
super(ShortNameField, self).__init__(*args, **kw)
def validate(self, value, *args, **kwargs):
super(ShortNameField, self).validate(value, *args, **kwargs)
if not self.patt.match(value):
raise ValidationError(gettext_lazy(
"Can only contain numbers, lowercase letters, '_' and '-'. "))
class LongNameField(models.CharField):
def __init__(self, *args, **kwargs):
kw = dict(
max_length=100,
verbose_name=gettext_lazy('Name'),
db_index=True)
kw.update(kwargs)
super(LongNameField, self).__init__(*args, **kw)
| bsd-3-clause | 45f0409348a1dd5baf87c4811683d79c | 34.418605 | 90 | 0.594222 | 4.061333 | false | false | false | false |
devilry/devilry-django | devilry/devilry_superadmin/management/commands/devilry_subjectsearch.py | 1 | 2015 | from django.core.management.base import BaseCommand
from devilry.apps.core.models import Subject
from devilry.utils.management import add_output_encoding_argument
class NodeSearchBase(BaseCommand):
nodecls = None
args = '[search|empty for all]'
attrs = ['short_name', 'long_name']
def add_arguments(self, parser):
parser.add_argument(
'--short_name-only',
action='store_true',
dest='short_name_only',
default=False,
help='Only print short name (one line per short_name)'),
add_output_encoding_argument(parser)
def _print_details(self, record):
print(self.get_short(record))
print(' id: {}'.format(record.id))
for attrname in self.attrs:
attr = getattr(record, attrname)
try:
attr = attr.encode(self.outputencoding)
except:
attr = attr.encode('ascii', 'replace')
print(' {attrname}: {attr}'.format(attrname=attrname,
attr=attr))
# print ' admins:'
# for admin in record.admins.all():
# print ' - {0}'.format(admin)
def show_search_results(self, options, qry):
for record in qry:
if options['short_name_only']:
print(self.get_short(record))
else:
self._print_details(record)
def handle(self, *args, **options):
self.outputencoding = options['outputencoding']
if len(args) == 1:
qry = self.get_qry(args[0])
else:
qry = self.nodecls.objects.all()
self.show_search_results(options, qry)
def get_qry(self, term):
return self.nodecls.objects.filter(short_name__icontains=term)
def get_short(self, record):
return record.short_name
class Command(NodeSearchBase):
help = 'Search for a subject by short_name. Matches any part of the short_name.'
nodecls = Subject
| bsd-3-clause | 13213774e7e5077da68c037b9bcfd0c4 | 32.583333 | 84 | 0.57469 | 3.966535 | false | false | false | false |
devilry/devilry-django | devilry/devilry_qualifiesforexam/admin.py | 1 | 1670 | from django.contrib import admin
from devilry.devilry_qualifiesforexam.models import Status, QualifiesForFinalExam
class QualifiesForFinalExamInline(admin.TabularInline):
model = QualifiesForFinalExam
raw_id_fields = [
'relatedstudent'
]
fields = ['relatedstudent', 'qualifies']
readonly_fields = ['relatedstudent', 'qualifies']
extra = 0
class StatusAdmin(admin.ModelAdmin):
raw_id_fields = [
'period'
]
inlines = [QualifiesForFinalExamInline]
list_display = (
'id',
'period',
'get_status_text',
'createtime',
'message',
)
search_fields = [
'id',
'period__short_name',
'period__long_name',
'period__parentnode__short_name',
'period__parentnode__long_name',
'message',
]
readonly_fields = [
'period',
'createtime',
'message',
'user',
'plugin',
'exported_timestamp',
'status'
]
def get_queryset(self, request):
return super(StatusAdmin, self).get_queryset(request)\
.select_related(
'period', 'period__parentnode')
# def admins_as_string(self, obj):
# return ', '.join([user.username for user in obj.admins.all()])
# admins_as_string.short_description = "Admins"
admin.site.register(Status, StatusAdmin)
class QualifiesForFinalExamAdmin(admin.ModelAdmin):
raw_id_fields = [
'relatedstudent'
]
list_display = (
'id',
'qualifies'
)
search_fields = [
'id'
]
admin.site.register(QualifiesForFinalExam, QualifiesForFinalExamAdmin)
| bsd-3-clause | bd74d23974b1f7f690e4f5202929a1bd | 22.194444 | 81 | 0.590419 | 3.711111 | false | false | false | false |
devilry/devilry-django | devilry/devilry_admin/tests/subject_for_period_admin/test_subject_redirect.py | 1 | 2259 |
import mock
from django.conf import settings
from django.http import Http404
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.devilry_admin.views.subject_for_period_admin import subject_redirect
class TestSubjectRedirect(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = subject_redirect.SubjectRedirectView
def test_404(self):
testsubject = baker.make('core.Subject')
with self.assertRaises(Http404):
self.mock_http302_getrequest(cradmin_role=testsubject)
def test_user_is_not_periodadmin_or_subjectadmin(self):
testsubject = baker.make('core.Subject')
testuser = baker.make(settings.AUTH_USER_MODEL)
with self.assertRaises(Http404):
self.mock_http302_getrequest(cradmin_role=testsubject, requestuser=testuser)
def test_redirect_to_overview_for_periodadmin(self):
testperiod = baker.make('core.Period')
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
mock_cradmin_instance = mock.MagicMock()
self.mock_http302_getrequest(
cradmin_role=testperiod.parentnode,
cradmin_instance=mock_cradmin_instance,
requestuser=testuser
)
mock_cradmin_instance.rolefrontpage_url.assert_called_once_with(roleid=testperiod.parentnode.id)
def test_redirect_to_overview_for_subject_admins(self):
testsubject = baker.make('core.Subject')
subjectpermissiongroup = baker.make('devilry_account.SubjectPermissionGroup', subject=testsubject)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=subjectpermissiongroup.permissiongroup)
mockresponse = self.mock_http302_getrequest(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual('/devilry_admin/subject/{}/overview/'.format(testsubject.id), mockresponse.response.url)
| bsd-3-clause | 98825fecec806836de79de5da2ce8ffe | 46.0625 | 113 | 0.729969 | 3.777592 | false | true | false | false |
devilry/devilry-django | devilry/devilry_gradingsystem/tests/views/admin/test_setmaxpoints.py | 1 | 5365 | import unittest
from mock import patch
from django.urls import reverse
from django.test import TestCase
from devilry.project.develop.testhelpers.corebuilder import PeriodBuilder
from devilry.project.develop.testhelpers.corebuilder import UserBuilder
from devilry.project.develop.testhelpers.soupselect import cssGet
from devilry.project.develop.testhelpers.soupselect import cssFind
from devilry.project.develop.testhelpers.soupselect import cssExists
from devilry.devilry_gradingsystem.pluginregistry import GradingSystemPluginRegistry
from .base import AdminViewTestMixin
from .base import MockApprovedPluginApi
from .base import MockPointsPluginApi
# from .base import MockRequiresConfigurationPluginApi
@unittest.skip('devilry_gradingsystem will most likely be replaced in 3.0')
class TestSetMaxPointsView(TestCase, AdminViewTestMixin):
def setUp(self):
self.admin1 = UserBuilder('admin1').user
self.assignmentbuilder = PeriodBuilder.quickadd_ducku_duck1010_active()\
.add_assignment('assignment1')\
.add_admins(self.admin1)
self.url = reverse('devilry_gradingsystem_admin_setmaxpoints', kwargs={
'assignmentid': self.assignmentbuilder.assignment.id,
})
def test_invalid_pluginid_404(self):
myregistry = GradingSystemPluginRegistry()
self.assignmentbuilder.update(grading_system_plugin_id=1001)
with patch('devilry.apps.core.models.assignment.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1)
self.assertEqual(response.status_code, 404)
def test_render(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
self.assignmentbuilder.update(grading_system_plugin_id=MockPointsPluginApi.id)
with patch('devilry.apps.core.models.assignment.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1)
self.assertEqual(response.status_code, 200)
html = response.content
self.assertEqual(cssGet(html, '.page-header h1').text.strip(),
'Set the maximum possible number of points')
self.assertTrue(cssExists(html, '#id_max_points'))
self.assertEqual(cssGet(html, '#id_max_points')['value'], '1') # The default value
def test_sets_max_points_automatically(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockApprovedPluginApi)
self.assignmentbuilder.update(grading_system_plugin_id=MockApprovedPluginApi.id)
with patch('devilry.apps.core.models.assignment.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1)
self.assertEqual(response.status_code, 302)
self.assertTrue(response["Location"].endswith(
reverse('devilry_gradingsystem_admin_select_points_to_grade_mapper', kwargs={
'assignmentid': self.assignmentbuilder.assignment.id})))
self.assignmentbuilder.reload_from_db()
self.assertEqual(self.assignmentbuilder.assignment.max_points,
MockApprovedPluginApi(self.assignmentbuilder.assignment).get_max_points())
def test_render_default_to_current_value(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
self.assignmentbuilder.update(
grading_system_plugin_id=MockPointsPluginApi.id,
max_points=2030
)
with patch('devilry.apps.core.models.assignment.gradingsystempluginregistry', myregistry):
response = self.get_as(self.admin1)
html = response.content
self.assertEqual(cssGet(html, '#id_max_points')['value'], '2030')
def test_post_valid_form(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
self.assignmentbuilder.update(grading_system_plugin_id=MockPointsPluginApi.id)
with patch('devilry.apps.core.models.assignment.gradingsystempluginregistry', myregistry):
response = self.post_as(self.admin1, {
'max_points': 100
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response["Location"].endswith(
reverse('devilry_gradingsystem_admin_select_points_to_grade_mapper', kwargs={
'assignmentid': self.assignmentbuilder.assignment.id})))
self.assignmentbuilder.reload_from_db()
self.assertEqual(self.assignmentbuilder.assignment.max_points, 100)
def test_post_negative_value_shows_error(self):
myregistry = GradingSystemPluginRegistry()
myregistry.add(MockPointsPluginApi)
self.assignmentbuilder.update(
grading_system_plugin_id=MockPointsPluginApi.id,
max_points=10
)
with patch('devilry.apps.core.models.assignment.gradingsystempluginregistry', myregistry):
response = self.post_as(self.admin1, {
'max_points': -1
})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.assignmentbuilder.assignment.max_points, 10) # Unchanged
html = response.content
self.assertIn('Ensure this value is greater than or equal to 0', html)
| bsd-3-clause | 3f7f5419412d74b7fe87052cdef66186 | 48.675926 | 98 | 0.695806 | 4.033835 | false | true | false | false |
devilry/devilry-django | devilry/devilry_frontpage/tests/test_frontpage.py | 1 | 5918 | from django.conf import settings
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_frontpage.views import frontpage
class TestFrontpage(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = frontpage.FrontpageView
def test_title(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertEqual('Devilry frontpage',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertEqual('Choose your role',
mockresponse.selector.one('h1').alltext_normalized)
def test_user_is_student(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Candidate',
relatedstudent__user=testuser,
assignment_group__parentnode=baker.make_recipe('devilry.apps.core.assignment_activeperiod_start'))
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
def test_user_is_examiner(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=testuser,
assignmentgroup__parentnode=baker.make_recipe('devilry.apps.core.assignment_activeperiod_start'))
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
def test_user_is_superuser(self):
testuser = baker.make(settings.AUTH_USER_MODEL, is_superuser=True)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertTrue(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
def test_user_is_departmentadmin(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser', user=testuser,
permissiongroup=baker.make(
'devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_DEPARTMENTADMIN).permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertFalse(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
def test_user_is_subjectadmin(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser', user=testuser,
permissiongroup=baker.make(
'devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN).permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertFalse(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
def test_user_is_periodadmin(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser', user=testuser,
permissiongroup=baker.make('devilry_account.PeriodPermissionGroup').permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(requestuser=testuser)
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-student'))
self.assertFalse(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-examiner'))
self.assertTrue(
mockresponse.selector.exists('.devilry-frontpage-listbuilder-roleselect-itemvalue-anyadmin'))
self.assertFalse(mockresponse.selector.exists('.devilry-frontpage-superuser-link'))
| bsd-3-clause | 7778b539c255f73052ca4be9f753bd6c | 56.456311 | 117 | 0.710206 | 3.97448 | false | true | false | false |
devilry/devilry-django | devilry/devilry_statistics/tests/test_api_examiner_average_grading_points.py | 1 | 11199 | from django import test
from model_bakery import baker
from devilry.apps.core.models import Assignment
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.utils.api import api_test_mixin
from devilry.devilry_statistics.api.assignment import examiner_average_grading_points
from devilry.devilry_group import devilry_group_baker_factories as group_baker
class TestExaminerAverageGradingPointsApi(test.TestCase, api_test_mixin.ApiTestMixin):
apiview_class = examiner_average_grading_points.ExaminerAverageGradingPointsApi
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_not_authenticated(self):
response = self.make_get_request()
self.assertEqual(response.status_code, 403)
def test_validator_assignment_id_missing_raises_403(self):
# Raises 403 because permission query is executed first (requires assignment_id)
response = self.make_get_request(
requestuser=self.make_superuser(),
viewkwargs={'relatedexaminer_id': 1})
self.assertEqual(response.status_code, 403)
def test_validator_relatedexaminer_id_missing(self):
assignment = baker.make('core.Assignment')
response = self.make_get_request(
requestuser=self.make_superuser(),
viewkwargs={'assignment_id': assignment.id})
self.assertEqual(str(response.data['relatedexaminer_id'][0]), 'This field is required.')
self.assertEqual(response.status_code, 400)
def test_user_has_no_access(self):
assignment = baker.make('core.Assignment')
relatedexaminer = baker.make('core.RelatedExaminer', user__fullname='Test User')
response = self.make_get_request(
requestuser=self.make_user(),
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.status_code, 403)
def test_period_admin_on_different_period_does_not_have_access(self):
other_period = baker.make('core.Period')
period = baker.make('core.Period')
assignment = baker.make('core.Assignment', parentnode=period)
requestuser = self.make_user()
permissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=other_period)
baker.make('devilry_account.PermissionGroupUser',
user=requestuser,
permissiongroup=permissiongroup.permissiongroup)
response = self.make_get_request(
requestuser=requestuser,
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': 1})
self.assertEqual(response.status_code, 403)
def test_period_admin_has_access(self):
period = baker.make('core.Period')
assignment = baker.make('core.Assignment', parentnode=period)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=1)
requestuser = self.make_user()
permissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=period)
baker.make('devilry_account.PermissionGroupUser',
user=requestuser,
permissiongroup=permissiongroup.permissiongroup)
response = self.make_get_request(
requestuser=requestuser,
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.status_code, 200)
def test_subject_admin_on_different_subject_does_not_have_access(self):
other_subject = baker.make('core.Subject')
subject = baker.make('core.Subject')
period = baker.make('core.Period', parentnode=subject)
assignment = baker.make('core.Assignment', parentnode=period)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
requestuser = self.make_user()
permissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
subject=other_subject)
baker.make('devilry_account.PermissionGroupUser',
user=requestuser,
permissiongroup=permissiongroup.permissiongroup)
response = self.make_get_request(
requestuser=requestuser,
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.status_code, 403)
def test_subject_admin_has_access(self):
subject = baker.make('core.Subject')
period = baker.make('core.Period', parentnode=subject)
assignment = baker.make('core.Assignment', parentnode=period)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=1)
requestuser = self.make_user()
permissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=subject)
baker.make('devilry_account.PermissionGroupUser',
user=requestuser,
permissiongroup=permissiongroup.permissiongroup)
response = self.make_get_request(
requestuser=requestuser,
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.status_code, 200)
def test_department_admin_has_access(self):
subject = baker.make('core.Subject')
period = baker.make('core.Period', parentnode=subject)
assignment = baker.make('core.Assignment', parentnode=period)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=1)
requestuser = self.make_user()
permissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
subject=subject, permissiongroup__grouptype='departmentadmin')
baker.make('devilry_account.PermissionGroupUser',
user=requestuser,
permissiongroup=permissiongroup.permissiongroup)
response = self.make_get_request(
requestuser=requestuser,
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.status_code, 200)
def __make_group_for_relatedexaminer(self, assignment, relatedexaminer, grading_points):
group = baker.make('core.AssignmentGroup', parentnode=assignment)
baker.make('core.Examiner', relatedexaminer=relatedexaminer, assignmentgroup=group)
group_baker.feedbackset_first_attempt_published(group=group, grading_points=grading_points)
return group
def test_sanity(self):
period = baker.make('core.Period')
assignment = baker.make('core.Assignment', parentnode=period)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=1)
response = self.make_get_request(
requestuser=self.make_superuser(),
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.data,
{'average_grading_points_given': '1.00',
'user_name': 'Test User'})
def test_assignment_grading_average_sanity(self):
period = baker.make('core.Period')
assignment = baker.make('core.Assignment', parentnode=period,
max_points=50,
points_to_grade_mapper=Assignment.POINTS_TO_GRADE_MAPPER_RAW_POINTS)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=25)
response = self.make_get_request(
requestuser=self.make_superuser(),
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.data,
{'average_grading_points_given': '25.00',
'user_name': 'Test User'})
def test_assignment_multiple_groups_grading_average_sanity(self):
period = baker.make('core.Period')
assignment = baker.make('core.Assignment',
parentnode=period, max_points=10,
points_to_grade_mapper=Assignment.POINTS_TO_GRADE_MAPPER_RAW_POINTS)
relatedexaminer = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User')
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=5)
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=10)
self.__make_group_for_relatedexaminer(assignment=assignment, relatedexaminer=relatedexaminer, grading_points=15)
response = self.make_get_request(
requestuser=self.make_superuser(),
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer.id})
self.assertEqual(response.data,
{'average_grading_points_given': '10.00',
'user_name': 'Test User'})
def test_assignment_grading_average_only_for_relatedexaminer_id_passed(self):
period = baker.make('core.Period')
assignment = baker.make('core.Assignment',
parentnode=period, max_points=10,
points_to_grade_mapper=Assignment.POINTS_TO_GRADE_MAPPER_RAW_POINTS)
relatedexaminer1 = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User 1')
relatedexaminer2 = baker.make('core.RelatedExaminer', period=period, user__fullname='Test User 2')
self.__make_group_for_relatedexaminer(
assignment=assignment, relatedexaminer=relatedexaminer1, grading_points=5)
self.__make_group_for_relatedexaminer(
assignment=assignment, relatedexaminer=relatedexaminer2, grading_points=10)
response = self.make_get_request(
requestuser=self.make_superuser(),
viewkwargs={'assignment_id': assignment.id, 'relatedexaminer_id': relatedexaminer1.id})
self.assertEqual(response.data,
{'average_grading_points_given': '5.00',
'user_name': 'Test User 1'})
| bsd-3-clause | 738e9c7462669b2e1ed95c068432f149 | 56.137755 | 120 | 0.664791 | 3.944699 | false | true | false | false |
devilry/devilry-django | devilry/devilry_gradingsystem/views/admin/base.py | 1 | 3550 | from django.urls import reverse
from django.views.generic.detail import SingleObjectMixin
from django.http import Http404
from django.utils.translation import gettext_lazy
from devilry.apps.core.models import Assignment
from devilry.devilry_gradingsystem.pluginregistry import GradingSystemPluginNotInRegistryError
class AssignmentSingleObjectMixin(SingleObjectMixin):
model = Assignment
pk_url_kwarg = 'assignmentid'
context_object_name = 'assignment'
def get_queryset(self):
return Assignment.objects.filter_user_is_admin(self.request.user)\
.select_related(
'parentnode', # Period
'parentnode__parentnode') # Subject
class WizardStep(object):
def __init__(self, wizard_steps, slug, title, index):
self.wizard_steps = wizard_steps
self.slug = slug
self.title = title
self.index = index
def get_number(self):
return self.index + 1
def get_total(self):
return len(self.wizard_steps)
def get_percent(self):
return int(float(self.get_number()) / len(self.wizard_steps) * 100)
def get_url(self):
urlname = 'devilry_gradingsystem_admin_{}'.format(self.slug)
return reverse(urlname, kwargs={
'assignmentid': self.wizard_steps.assignment.id
})
def get_previous_url(self):
if self.index == 0:
return reverse('devilry_gradingsystem_admin_selectplugin', kwargs={
'assignmentid': self.wizard_steps.assignment.id
})
else:
return self.wizard_steps.get_by_index(self.index-1).get_url()
def is_last(self):
return self.index == len(self.wizard_steps) - 1
class WizardSteps(object):
def __init__(self, assignment):
self.assignment = assignment
pluginapi = assignment.get_gradingsystem_plugin_api()
self.ordered = []
self.by_slug = {}
if pluginapi.requires_configuration:
self.add_step('configure_plugin', gettext_lazy('Configure'))
if not pluginapi.sets_max_points_automatically:
self.add_step('setmaxpoints', gettext_lazy('Set the maximum possible number of points'))
self.add_step('select_points_to_grade_mapper', gettext_lazy('Select how results are presented to the students'))
if assignment.points_to_grade_mapper == 'custom-table':
self.add_step('setup_custom_table', gettext_lazy('Map points to grade'))
if not pluginapi.sets_passing_grade_min_points_automatically:
self.add_step('setpassing_grade_min_points', gettext_lazy('Set the minumum number of points required to pass'))
def add_step(self, slug, title):
index = len(self.ordered)
entry = WizardStep(self, slug, title, index)
self.ordered.append(entry)
self.by_slug[slug] = entry
def get_by_slug(self, slug):
return self.by_slug[slug]
def get_by_index(self, index):
return self.ordered[index]
def __len__(self):
return len(self.ordered)
class AssignmentSingleObjectRequiresValidPluginMixin(AssignmentSingleObjectMixin):
def get_object(self):
assignment = super(AssignmentSingleObjectRequiresValidPluginMixin, self).get_object()
try:
assignment.get_gradingsystem_plugin_api()
except GradingSystemPluginNotInRegistryError:
raise Http404()
return assignment
def get_wizard_step_map(self):
assignment = self.object
return WizardSteps(assignment)
| bsd-3-clause | 013ed70362e37a88f964448f8555aee4 | 34.858586 | 123 | 0.661127 | 3.914002 | false | false | false | false |
devilry/devilry-django | devilry/devilry_dbcache/tests/test_bulk_create_queryset_mixin.py | 1 | 2501 |
from django import test
from devilry.devilry_dbcache.devilry_dbcache_testapp.models import Person
class TestBulkCreateQuerySetMixin(test.TestCase):
def test_sanity(self):
bulk_operation = Person.objects.postgres_bulk_create([
Person(name='test1'),
Person(name='test2'),
Person(name='test3'),
])
# print bulk_operation.explain(compact=False)
ids = bulk_operation.execute()
self.assertEqual(len(ids), 3)
self.assertTrue(Person.objects.filter(name='test1').exists())
self.assertTrue(Person.objects.filter(name='test2').exists())
self.assertTrue(Person.objects.filter(name='test3').exists())
def test_works_with_defaults(self):
bulk_operation = Person.objects.postgres_bulk_create([
Person(name='test1'),
])
bulk_operation.execute()
created_person = Person.objects.get(name='test1')
self.assertEqual(20, created_person.age)
def test_batch_size(self):
bulk_operation = Person.objects.postgres_bulk_create([
Person(name='test1'),
Person(name='test2'),
Person(name='test3'),
Person(name='test4'),
Person(name='test5'),
], batch_size=2)
ids = bulk_operation.execute()
self.assertEqual(len(ids), 5)
self.assertTrue(Person.objects.filter(name='test1').exists())
self.assertTrue(Person.objects.filter(name='test2').exists())
self.assertTrue(Person.objects.filter(name='test3').exists())
self.assertTrue(Person.objects.filter(name='test4').exists())
self.assertTrue(Person.objects.filter(name='test5').exists())
def test_return_objects(self):
bulk_operation = Person.objects.postgres_bulk_create([
Person(name='test1'),
Person(name='test2'),
Person(name='test3'),
])
people = bulk_operation.execute_and_return_objects()
self.assertEqual(len(people), 3)
self.assertEqual(people[0], Person.objects.get(name='test1'))
self.assertEqual(people[1], Person.objects.get(name='test2'))
self.assertEqual(people[2], Person.objects.get(name='test3'))
# def test_performance(self):
# people = [Person(name='test{}'.format(x)) for x in range(10000)]
# bulk_operation = Person.objects.postgres_bulk_create(people)
# bulk_operation.execute()
# self.assertEqual(10000, Person.objects.count())
| bsd-3-clause | a0ef878b5a9ff0fec0baa91094201e70 | 39.33871 | 74 | 0.62615 | 3.847692 | false | true | false | false |
devilry/devilry-django | devilry/devilry_admin/views/common/bulkimport_users_common.py | 1 | 5904 | import re
from crispy_forms import layout
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext_lazy
from cradmin_legacy.crispylayouts import PrimarySubmit
from cradmin_legacy.viewhelpers import formbase
from devilry.devilry_account.models import PermissionGroup
class AbstractTypeInUsersView(formbase.FormView):
users_blob_split_pattern = re.compile(r'[,;\s]+')
create_button_label = gettext_lazy('Save')
template_name = 'devilry_admin/common/abstract-type-in-users.django.html'
def dispatch(self, request, *args, **kwargs):
requestuser_devilryrole = request.cradmin_instance.get_devilryrole_for_requestuser()
if requestuser_devilryrole != PermissionGroup.GROUPTYPE_DEPARTMENTADMIN:
raise Http404()
return super(AbstractTypeInUsersView, self).dispatch(request=request, *args, **kwargs)
def get_backlink_url(self):
raise NotImplementedError()
def get_backlink_label(self):
raise NotImplementedError()
@classmethod
def split_users_blob(cls, users_blob):
"""
Split the given string of users by ``,`` and whitespace.
Returns a set.
"""
users_blob_split = cls.users_blob_split_pattern.split(users_blob)
if len(users_blob_split) == 0:
return []
if users_blob_split[0] == '':
del users_blob_split[0]
if len(users_blob_split) > 0 and users_blob_split[-1] == '':
del users_blob_split[-1]
return set(users_blob_split)
def __get_users_blob_help_text(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
return gettext_lazy('Type or paste in email addresses separated by comma (","), space or one user on each line.')
else:
return gettext_lazy('Type or paste in usernames separated by comma (","), space or one user on each line.')
def __get_users_blob_placeholder(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
return gettext_lazy('jane@example.com\njohn@example.com')
else:
return gettext_lazy('jane\njohn')
def get_form_class(self):
users_blob_help_text = self.__get_users_blob_help_text()
class UserImportForm(forms.Form):
users_blob = forms.CharField(
widget=forms.Textarea,
required=True,
help_text=users_blob_help_text
)
def __validate_users_blob_emails(self, emails):
invalid_emails = []
for email in emails:
try:
validate_email(email)
except ValidationError:
invalid_emails.append(email)
if invalid_emails:
self.add_error(
'users_blob',
gettext_lazy('Invalid email addresses: %(emails)s') % {
'emails': ', '.join(sorted(invalid_emails))
}
)
def __validate_users_blob_usernames(self, usernames):
valid_username_pattern = re.compile(
getattr(settings, 'DEVILRY_VALID_USERNAME_PATTERN', r'^[a-z0-9]+$'))
invalid_usernames = []
for username in usernames:
if not valid_username_pattern.match(username):
invalid_usernames.append(username)
if invalid_usernames:
self.add_error(
'users_blob',
gettext_lazy('Invalid usernames: %(usernames)s') % {
'usernames': ', '.join(sorted(invalid_usernames))
}
)
def clean(self):
cleaned_data = super(UserImportForm, self).clean()
users_blob = cleaned_data.get('users_blob', None)
if users_blob:
users = AbstractTypeInUsersView.split_users_blob(users_blob)
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
self.__validate_users_blob_emails(emails=users)
else:
self.__validate_users_blob_usernames(usernames=users)
self.cleaned_users_set = users
return UserImportForm
def get_field_layout(self):
return [
layout.Div(
layout.Field('users_blob', placeholder=self.__get_users_blob_placeholder()),
css_class='cradmin-globalfields cradmin-legacy-formfield-label-sr-only')
]
def get_buttons(self):
return [
PrimarySubmit('save', self.create_button_label),
]
def get_success_url(self):
return self.request.cradmin_app.reverse_appindexurl()
def import_users_from_emails(self, emails):
raise NotImplementedError()
def import_users_from_usernames(self, usernames):
raise NotImplementedError()
def form_valid(self, form):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
self.import_users_from_emails(emails=form.cleaned_users_set)
else:
self.import_users_from_usernames(usernames=form.cleaned_users_set)
return HttpResponseRedirect(str(self.get_success_url()))
def get_context_data(self, **kwargs):
context = super(AbstractTypeInUsersView, self).get_context_data(**kwargs)
context['backlink_url'] = self.get_backlink_url()
context['backlink_label'] = self.get_backlink_label()
context['uses_email_auth_backend'] = settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND
return context
| bsd-3-clause | c5a4a8a8909a67d12249bf6f5dd753b0 | 39.163265 | 125 | 0.592988 | 4.163611 | false | false | false | false |
devilry/devilry-django | devilry/devilry_compressionutil/backend_registry.py | 1 | 2209 | from ievv_opensource.utils.singleton import Singleton
class DuplicateBackendTypeError(Exception):
"""
Exception raised when trying to add multiple :class:`.~devilry.devilry_ziputil.backends.PythonZipFileBackend`
with same ID.
"""
class Registry(Singleton):
"""
Registry for subclasses of
:class:`~devilry.devilry_ziputil.backends.backends_base.PythonZipFileBackend`.
"""
def __init__(self):
super(Registry, self).__init__()
self._backendclasses = {}
def __get_class_path(self):
"""
Get class path.
Returns:
Classpath.
"""
return '{}.{}'.format(self.__module__, self.__class__.__name__)
def add(self, backend):
"""
Add a backend class.
Args:
backend: backend class.
"""
if backend.backend_id in self._backendclasses:
raise DuplicateBackendTypeError('Duplicate backend id in {}: {}'.format(
self.__get_class_path(), backend.backend_id
))
self._backendclasses[backend.backend_id] = backend
def get(self, backend_id):
"""
Get backend class.
Args:
backend_id: ID of backend class.
Returns:
:class:`~devilry.devilry_ziputil.backends.backends_base.PythonZipFileBackend` subclass or ``None``.
"""
try:
backend_class = self._backendclasses[backend_id]
except KeyError:
return None
return backend_class
class MockableRegistry(Registry):
"""
A non-singleton version of :class:`.Registry` for tests.
"""
def __init__(self):
self._instance = None
super(MockableRegistry, self).__init__()
@classmethod
def make_mockregistry(cls, *backend_classes):
"""
Create a mocked instance of Registry.
Args:
*backend_classes: Backends to add.
Returns:
MockableRegistry: An object of this class with the ``backend_classes`` registered.
"""
mockregistry = cls()
for backend_class in backend_classes:
mockregistry.add(backend_class)
return mockregistry
| bsd-3-clause | b80fc54f0638646de601af448bcff22e | 25.939024 | 113 | 0.583522 | 4.47166 | false | false | false | false |
devilry/devilry-django | devilry/devilry_group/feedbackfeed_builder/feedbackfeed_timelinebuilder.py | 1 | 10659 | # -*- coding: utf-8 -*-
import collections
import datetime
# Django imports
import functools
from django.utils import timezone
# Devilry/cradmin imports
from devilry.devilry_group.models import GroupComment
from devilry.devilry_group.feedbackfeed_builder import builder_base
class AbstractTimelineBuilder(object):
def build(self):
raise NotImplementedError()
def _add_event_item_to_timeline(self, datetime_obj, event_dict):
"""
General function for adding an event to the timeline.
An event item is anything that occurs on the feedbackfeed that can
be sorted; a comment, deadline created, deadline expired and grading.
Args:
datetime_obj: The datetime the event should be ordered by.
event_dict: The event dictionary.
"""
if datetime_obj is None:
return
if datetime_obj not in self.time_line:
self.time_line[datetime_obj] = []
event_dict['ordering_datetime'] = datetime_obj
self.time_line[datetime_obj].append(event_dict)
def get_as_list(self):
"""
Get a flat list of event dictionaries.
Returns:
list: List of event-dictionaries.
"""
timeline_list = []
for datetime_obj in sorted(self.time_line.keys()):
for event_dict in self.time_line[datetime_obj]:
timeline_list.append(event_dict)
return timeline_list
def sort_dict(self, dictionary):
"""
Sorts the timeline after all events are added.
Args:
dictionary (dict): Dictionary of item with datetime keys.
"""
def compare_items(item_a, item_b):
datetime_a = item_a[0]
datetime_b = item_b[0]
if datetime_a is None:
datetime_a = datetime.datetime(1970, 1, 1)
if datetime_b is None:
datetime_b = datetime.datetime(1970, 1, 1)
return (datetime_a > datetime_b) - (datetime_a < datetime_b)
return collections.OrderedDict(sorted(list(dictionary.items()), key=functools.cmp_to_key(compare_items)))
class FeedbackFeedTimelineBuilder(AbstractTimelineBuilder, builder_base.FeedbackFeedBuilderBase):
"""
Builds a sorted timeline of events that occur in the feedbackfeed.
Generates a dictionary of events such as comments, new deadlines, expired deadlines and grading.
"""
def __init__(self, **kwargs):
"""
Initialize instance of :class:`~FeedbackFeedTimelineBuilder`.
Args:
group: An :obj:`~devilry.apps.core.AssignmentGroup` object.
feedbacksets: Fetched feedbacksets, comments and files.
"""
super(FeedbackFeedTimelineBuilder, self). __init__(**kwargs)
self.time_line = {}
def get_as_list_flat(self):
timeline_list = []
for datetime_obj in sorted(self.time_line.keys()):
for event_dict in self.time_line[datetime_obj]:
timeline_list.append(event_dict)
for feedbackset_event_dict in event_dict['feedbackset_events']:
timeline_list.append(feedbackset_event_dict)
return timeline_list
def __should_skip_feedback_set(self, feedback_set):
"""
Skip adding merge type feedbackset if they are not graded or
do not have any comments.
Note::
The feedback_set.groupcomment_set is already prefetched and filtered based on the
devilryrole (student, examiner and admin). If no public comments exists, feedbackset is not graded and the
devilryrole is ``student``, the merged feedbackset will not be added (rendered for a student).
See ``builder_base.get_feedbackfeed_builder_queryset`` for more details.
Args:
feedback_set: The ``FeedbackSet`` to check.
Returns:
bool: ``True`` or ``False``.
"""
if feedback_set.is_merge_type:
if feedback_set.grading_published_datetime or feedback_set.groupcomment_set.exists():
return False
return True
return False
def __get_order_feedback_set_by_deadline_datetime(self, feedback_set):
"""
Get the ordering object for FeedbackSets.
If the ``feedback_set.feedbackset_type`` is a merge type, we subtract 1 millisecond
from the ``feedback_set.deadline_datetime``. We always want the merged feedbacksets to come before the current
last feedbackset.
Else return the ``feedback_set.deadline_datetime`` as is.
Args:
feedback_set: A ``FeedbackSet`` instance.
Returns:
DateTime: Datetime object to order by.
"""
if feedback_set.is_merge_type:
return feedback_set.created_datetime
return feedback_set.deadline_datetime
def build(self):
for feedback_set in self.feedbacksets:
if self.__should_skip_feedback_set(feedback_set=feedback_set):
continue
feedback_set_event = FeedbackSetEventTimeLine(
feedback_set=feedback_set,
assignment=self.assignment)
feedback_set_event.build()
self._add_event_item_to_timeline(
datetime_obj=self.__get_order_feedback_set_by_deadline_datetime(feedback_set=feedback_set),
event_dict={
'feedbackset': feedback_set,
'feedbackset_events': feedback_set_event.get_as_list()
}
)
self.time_line = self.sort_dict(self.time_line)
class FeedbackSetEventTimeLine(AbstractTimelineBuilder):
"""
"""
def __init__(self, feedback_set, assignment):
super(FeedbackSetEventTimeLine, self).__init__()
self.feedback_set = feedback_set
self.assignment = assignment
self.time_line = {}
def __add_deadline_expired_if_needed(self):
"""
Adds a deadline_expired event type to the timeline.
The expired deadline is the :func:`devilry.devilry_group.models.FeedbackSet.current_deadline` of
``feedbackset``.
"""
current_deadline = self.feedback_set.current_deadline(assignment=self.assignment)
if current_deadline is None:
return
if current_deadline <= timezone.now():
self._add_event_item_to_timeline(
datetime_obj=current_deadline,
event_dict={
"type": "deadline_expired",
"deadline_datetime": current_deadline,
"feedbackset": self.feedback_set
})
def __add_grade_to_timeline_if_published(self):
"""
Add a grade event when the :obj:`devilry.devilry_group.models.FeedbackSet.grading_published_datetime` is set for
``feedbackset``.
"""
grade_points = self.feedback_set.grading_points
if len(self.feedback_set.grading_updates) > 0:
grade_points = self.feedback_set.grading_updates[0].old_grading_points
self._add_event_item_to_timeline(
datetime_obj=self.feedback_set.grading_published_datetime,
event_dict={
'type': 'grade',
'feedbackset': self.feedback_set,
'grade_points': grade_points,
'assignment': self.assignment
}
)
def __add_comment_to_timeline(self, group_comment):
"""
Adds a :class:`devilry.devilry_group.models.GroupComment` to the timeline.
Args:
group_comment: The comment to add.
"""
event_dict = {
"type": "comment",
"obj": group_comment,
"related_deadline": self.feedback_set.current_deadline(assignment=self.assignment),
}
self._add_event_item_to_timeline(
datetime_obj=group_comment.published_datetime,
event_dict=event_dict
)
def __add_comments_to_timeline(self):
"""
Iterates through the comments for ``feedbackset`` and adds them to the time-line.
"""
for group_comment in self.feedback_set.groupcomment_set.all():
self.__add_comment_to_timeline(group_comment=group_comment)
def __add_deadline_moved_event(self):
"""
Iterates through the log entries for changes in the :obj:`~.devilry.devilry_group.models.FeedbackSet`s
deadline_datetime and adds them to the time-line.
"""
deadline_history_queryset = self.feedback_set.feedbacksetdeadlinehistory_set \
.order_by('-changed_datetime')
last_deadline_history = None
if deadline_history_queryset.count() > 0:
last_deadline_history = deadline_history_queryset[0]
for deadline_history in deadline_history_queryset:
is_last = False
if deadline_history.changed_datetime == last_deadline_history.changed_datetime:
is_last = True
self._add_event_item_to_timeline(
datetime_obj=deadline_history.changed_datetime,
event_dict={
'type': 'deadline_moved',
'is_last': is_last,
'obj': deadline_history,
'feedbackset': self.feedback_set
}
)
def __add_grading_updated_event(self):
"""
Add event for updated grading on a :class:`~.devilry.devilry_group.models.FeedbackSet`.
"""
grading_updates_length = len(self.feedback_set.grading_updates)
for index, grading_updated in enumerate(self.feedback_set.grading_updates):
if index+1 == grading_updates_length:
next_grading_points = self.feedback_set.grading_points
else:
next_grading_points = self.feedback_set.grading_updates[index+1].old_grading_points
self._add_event_item_to_timeline(
datetime_obj=grading_updated.updated_datetime,
event_dict={
'type': 'grading_updated',
'obj': grading_updated,
'next_grading_points': next_grading_points,
'feedbackset': self.feedback_set
}
)
def build(self):
self.__add_deadline_moved_event()
self.__add_deadline_expired_if_needed()
self.__add_grade_to_timeline_if_published()
self.__add_comments_to_timeline()
self.__add_grading_updated_event()
self.time_line = self.sort_dict(self.time_line)
| bsd-3-clause | ced0bc4c0b1c32cc880ba09934c92cf3 | 37.76 | 120 | 0.597992 | 4.291063 | false | false | false | false |
devilry/devilry-django | devilry/devilry_admin/views/period/manage_tags/manage_tags.py | 1 | 24353 | # -*- coding: utf-8 -*-
import re
from django import forms
from django.db import models
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db import transaction
from django.http import Http404
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import gettext_lazy, pgettext_lazy
from django.views.generic import TemplateView
from django.views.generic import View
from crispy_forms import layout
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.crispylayouts import PrimarySubmit
from cradmin_legacy.viewhelpers import formbase
from cradmin_legacy.viewhelpers import update, delete, crudbase
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers.listbuilder import itemvalue
from cradmin_legacy.viewhelpers import multiselect2view
from cradmin_legacy.viewhelpers import multiselect2
from devilry.apps.core.models import PeriodTag
from devilry.apps.core.models import RelatedStudent, RelatedExaminer
from devilry.devilry_admin.cradminextensions.listfilter import listfilter_tags, listfilter_relateduser
class TagItemValue(itemvalue.EditDelete):
template_name = 'devilry_admin/period/manage_tags/tag-item-value.django.html'
def get_title(self):
return self.value.displayname
class HideShowPeriodTag(TemplateView):
def dispatch(self, request, *args, **kwargs):
tag_id = self.__get_tag_id(request)
period_tag = self.__get_period_tag(tag_id)
hide = False
if not period_tag.is_hidden:
hide = True
period_tag.is_hidden = hide
period_tag.full_clean()
period_tag.save()
return HttpResponseRedirect(str(self.request.cradmin_app.reverse_appindexurl()))
def __get_tag_id(self, request):
tag_id = request.GET.get('tag_id', None)
if not tag_id:
raise Http404('Missing parameters.')
return tag_id
def __get_period_tag(self, tag_id):
try:
period_tag = PeriodTag.objects.get(id=tag_id)
except PeriodTag.DoesNotExist:
raise Http404('Tag does not exist.')
return period_tag
class TagListBuilderListView(listbuilderview.FilterListMixin, listbuilderview.View):
"""
"""
template_name = 'devilry_admin/period/manage_tags/manage-tags-list-view.django.html'
model = PeriodTag
value_renderer_class = TagItemValue
paginate_by = 10
def get_pagetitle(self):
return gettext_lazy('Tags on %(what)s') % {'what': self.request.cradmin_role.parentnode}
def add_filterlist_items(self, filterlist):
filterlist.append(listfilter_tags.Search())
# filterlist.append(listfilter_tags.IsHiddenFilter())
filterlist.append(listfilter_tags.IsHiddenRadioFilter())
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'filter',
kwargs={'filters_string': filters_string}
)
def get_unfiltered_queryset_for_role(self, role):
queryset = self.model.objects.filter(period=role)\
.prefetch_related(
models.Prefetch('relatedstudents',
queryset=RelatedStudent.objects.all().select_related('user')
.order_by('user__shortname')))\
.prefetch_related(
models.Prefetch('relatedexaminers',
queryset=RelatedExaminer.objects.all().select_related('user')
.order_by('user__shortname')))
return queryset
def get_no_items_message(self):
return pgettext_lazy(
'TagListBuilderListView get_no_items_message',
'No period tags'
)
class CreatePeriodTagForm(forms.Form):
tag_text = forms.CharField()
def __init__(self, *args, **kwargs):
super(CreatePeriodTagForm, self).__init__(*args, **kwargs)
self.fields['tag_text'].label = gettext_lazy('Tags')
self.fields['tag_text'].help_text = gettext_lazy(
'Enter tags here. Tags must be in a comma separated format, '
'e.g: tag1, tag2, tag3. '
'Each tag may be up to 15 characters long.'
)
self.fields['tag_text'].widget = forms.Textarea()
def get_added_tags_list(self):
"""
Get a list of all the tags added in the form separated by comma.
Returns:
(list): List of tags as strings.
"""
return [tag.strip() for tag in self.cleaned_data['tag_text'].split(',')
if len(tag.strip()) > 0]
def clean(self):
super(CreatePeriodTagForm, self).clean()
if 'tag_text' not in self.cleaned_data or len(self.cleaned_data['tag_text']) == 0:
raise ValidationError(gettext_lazy('Tag field is empty.'))
tags_list = self.get_added_tags_list()
if len(tags_list) == 0:
if len(tags_list) > 15:
raise ValidationError(
{'tag_text': gettext_lazy('Wrong format. Example: tag1, tag2, tag3')}
)
for tag in tags_list:
if len(tag) > 15:
raise ValidationError(
{'tag_text': gettext_lazy('One or more tags exceed the limit of 15 characters.')}
)
if tags_list.count(tag) > 1:
raise ValidationError(
{'tag_text': gettext_lazy('"%(what)s" occurs more than once in the form.') % {'what': tag}}
)
class AddTagsView(formbase.FormView):
"""
View for adding a new tag to the semester.
"""
template_name = 'devilry_admin/period/manage_tags/add-tag.django.html'
form_class = CreatePeriodTagForm
@classmethod
def deserialize_preview(cls, serialized):
pass
def serialize_preview(self, form):
pass
def get_field_layout(self):
return [
layout.Div(
layout.Field('tag_text', focusonme='focusonme'),
css_class='cradmin-globalfields'
)
]
def get_buttons(self):
return [
PrimarySubmit('add_tags', gettext_lazy('Add tags'))
]
def get_success_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_periodadmin',
appname='manage_tags',
roleid=self.request.cradmin_role.id,
viewname=crapp.INDEXVIEW_NAME
)
def __create_tags(self, tags_string_list, excluded_tags):
tags = []
period = self.request.cradmin_role
for tag_string in tags_string_list:
if tag_string not in excluded_tags:
tags.append(PeriodTag(period=period, tag=tag_string))
with transaction.atomic():
PeriodTag.objects.bulk_create(tags)
return len(tags)
def form_valid(self, form):
tags_string_list = form.get_added_tags_list()
excluded_tags = PeriodTag.objects\
.filter_editable_tags_on_period(period=self.request.cradmin_role)\
.filter(tag__in=tags_string_list)\
.values_list('tag', flat=True)
# Check if all tags to be added exists.
if len(tags_string_list) == excluded_tags.count():
self.add_error_message(gettext_lazy('The tag(s) you wanted to add already exists.'))
return HttpResponseRedirect(str(self.request.cradmin_app.reverse_appurl(viewname='add_tag')))
# Add success message.
num_tags_created = self.__create_tags(tags_string_list, excluded_tags)
message = gettext_lazy('%(created)d tag(s) added') % {'created': num_tags_created}
if excluded_tags.count() > 0:
message += gettext_lazy(
', %(excluded)d tag(s) already existed and were ignored.') % {
'excluded': excluded_tags.count()
}
self.add_success_message(message)
return super(AddTagsView, self).form_valid(form=form)
def add_success_message(self, message):
messages.success(self.request, message=message)
def add_error_message(self, message):
messages.error(self.request, message=message)
def get_context_data(self, **kwargs):
context_data = super(AddTagsView, self).get_context_data(**kwargs)
period = self.request.cradmin_role
context_data['period'] = period
context_data['period_tags'] = PeriodTag.objects.filter(period=period)
return context_data
class EditPeriodTagForm(forms.ModelForm):
"""
Form for editing :class:`~.devilry.apps.core.models.period_tag.PeriodTag`s.
"""
class Meta:
model = PeriodTag
fields = [
'tag',
]
def __init__(self, *args, **kwargs):
self.period = kwargs.pop('period')
self.tagobject = kwargs.pop('tagobject')
super(EditPeriodTagForm, self).__init__(*args, **kwargs)
self.fields['tag'].label = gettext_lazy('Tag name')
self.fields['tag'].help_text = gettext_lazy(
'Rename the tag here. Up to 15 characters. '
'Can contain any character except comma(,)'
)
def clean(self):
cleaned_data = super(EditPeriodTagForm, self).clean()
if 'tag' not in self.cleaned_data or len(self.cleaned_data['tag']) == 0:
raise ValidationError(
{'tag': gettext_lazy('Tag cannot be empty.')}
)
tag = cleaned_data['tag']
if PeriodTag.objects.filter(period=self.period, tag=tag).exists():
if tag != self.tagobject.tag:
raise ValidationError(gettext_lazy('%(what)s already exists') % {'what': tag})
if ',' in tag:
raise ValidationError(
{'tag': gettext_lazy('Tag contains a comma(,).')}
)
return cleaned_data
class EditDeleteViewMixin(View):
"""
Edit/delete mixin for :class:`~.devilry.apps.core.models.period_tag.PeriodTag`.
Raises:
Http404: if prefix :attr:`~.devilry.apps.core.models.period_tag.PeriodTag.prefix`
is not blank.
"""
model = PeriodTag
def dispatch(self, request, *args, **kwargs):
self.tag_id = kwargs.get('pk')
self.tag = PeriodTag.objects.get(period=self.request.cradmin_role, id=self.tag_id)
if self.tag.prefix != '':
raise Http404()
return super(EditDeleteViewMixin, self).dispatch(request, *args, **kwargs)
def get_queryset_for_role(self, role):
return PeriodTag.objects.filter(period=role, id=self.tag_id)
def get_success_url(self):
return str(self.request.cradmin_app.reverse_appindexurl())
class EditTagView(crudbase.OnlySaveButtonMixin, EditDeleteViewMixin, update.UpdateView):
"""
Edit a :class:`~.devilry.apps.core.models.period_tag.PeriodTag`.
"""
template_name = 'devilry_admin/period/manage_tags/crud.django.html'
form_class = EditPeriodTagForm
def get_pagetitle(self):
return gettext_lazy('Edit %(what)s') % {
'what': self.tag.displayname
}
def get_field_layout(self):
return [
layout.Div(
layout.Field('tag', focusonme='focusonme'),
css_class='cradmin-globalfields'
)
]
def save_object(self, form, commit=True):
period_tag = super(EditTagView, self).save_object(form=form, commit=False)
period_tag.modified_datetime = timezone.now()
self.add_success_messages(gettext_lazy('Tag successfully edited.'))
return super(EditTagView, self).save_object(form=form, commit=True)
def get_form_kwargs(self):
kwargs = super(EditTagView, self).get_form_kwargs()
kwargs['period'] = self.request.cradmin_role
kwargs['tagobject'] = self.tag
return kwargs
def get_context_data(self, **kwargs):
period = self.request.cradmin_role
context_data = super(EditTagView, self).get_context_data(**kwargs)
context_data['period'] = period
context_data['period_tags'] = PeriodTag.objects\
.filter_editable_tags_on_period(period=period)
return context_data
class DeleteTagView(EditDeleteViewMixin, delete.DeleteView):
"""
Delete a :class:`~.devilry.apps.core.models.period_tag.PeriodTag`.
"""
template_name = 'devilry_admin/period/manage_tags/delete.django.html'
def get_object_preview(self):
periodtag = self.model.objects.get(id=self.tag_id)
return periodtag.tag
class SelectedRelatedUsersForm(forms.Form):
invalid_item_selected_message = gettext_lazy(
'Invalid user was selected. This may happen if someone else added or '
'removed one or more of the available users while you were selecting. '
'Please try again.'
)
selected_items = forms.ModelMultipleChoiceField(
queryset=None,
error_messages={
'invalid_choice': invalid_item_selected_message
}
)
def __init__(self, *args, **kwargs):
relatedusers_queryset = kwargs.pop('relatedusers_queryset')
super(SelectedRelatedUsersForm, self).__init__(*args, **kwargs)
self.fields['selected_items'].queryset = relatedusers_queryset
class SelectedItemsTarget(multiselect2.target_renderer.Target):
def __init__(self, *args, **kwargs):
self.relateduser_type = kwargs.pop('relateduser_type')
super(SelectedItemsTarget, self).__init__(*args, **kwargs)
def get_with_items_title(self):
return pgettext_lazy('admin multiselect2_relateduser',
'Selected %(what)s') % {'what': self.relateduser_type}
def get_without_items_text(self):
return pgettext_lazy('admin multiselect2_relateduser',
'No %(what)s selected') % {'what': self.relateduser_type}
class SelectedRelatedUserItem(multiselect2.selected_item_renderer.SelectedItem):
valuealias = 'relateduser'
def get_title(self):
return self.relateduser.user.shortname
class SelectableRelatedUserItem(multiselect2.listbuilder_itemvalues.ItemValue):
valuealias = 'relateduser'
selected_item_renderer_class = SelectedRelatedUserItem
def get_title(self):
return self.relateduser.user.shortname
class BaseRelatedUserMultiSelectView(multiselect2view.ListbuilderFilterView):
"""
Base multiselect view for :class:`~.devilry.apps.core.models.relateduser.RelatedExaminer`s and
:class:`~.devilry.apps.core.models.relateduser.RelatedStudents`s.
"""
template_name = 'devilry_admin/period/manage_tags/base-multiselect-view.django.html'
value_renderer_class = SelectableRelatedUserItem
form_class = SelectedRelatedUsersForm
paginate_by = 20
#: the specific tag :attr:`~.devilry.apps.core.models.period_tag.PeriodTag.tag`.
tag_id = None
#: Type of related user as shown in ui.
#: e.g 'student' or 'examiner'
relateduser_string = ''
def dispatch(self, request, *args, **kwargs):
self.tag_id = kwargs.get('tag_id')
return super(BaseRelatedUserMultiSelectView, self).dispatch(request, *args, **kwargs)
def get_target_renderer_class(self):
return SelectedItemsTarget
def get_period_tag(self):
return PeriodTag.objects.get(id=self.tag_id)
def get_tags_for_period(self):
return PeriodTag.objects.filter(period=self.request.cradmin_role)
def add_filterlist_items(self, filterlist):
filterlist.append(listfilter_relateduser.Search())
filterlist.append(listfilter_relateduser.OrderRelatedStudentsFilter())
filterlist.append(listfilter_relateduser.TagSelectFilter(period=self.request.cradmin_role))
def get_unfiltered_queryset_for_role(self, role):
"""
Get all relatedstudents for the period that are not already registered on the
tag provided with the url.
"""
return self.model.objects.filter(period=role)
def get_form_kwargs(self):
period = self.request.cradmin_role
kwargs = super(BaseRelatedUserMultiSelectView, self).get_form_kwargs()
kwargs['relatedusers_queryset'] = self.get_queryset_for_role(role=period)
return kwargs
def get_target_renderer_kwargs(self):
kwargs = super(BaseRelatedUserMultiSelectView, self).get_target_renderer_kwargs()
kwargs['relateduser_type'] = self.relateduser_string
return kwargs
def add_success_message(self, message):
messages.success(self.request, message=message)
def add_error_message(self, message):
messages.error(self.request, message=message)
def get_success_url(self):
return str(self.request.cradmin_app.reverse_appindexurl())
class AddRelatedUserToTagMultiSelectView(BaseRelatedUserMultiSelectView):
"""
Add related users to a :class:`~.devilry.apps.core.models.period_tag.PeriodTag`.
"""
def get_pagetitle(self):
tag_displayname = self.get_period_tag().displayname
return gettext_lazy(
'Add %(user)s to %(tag)s') % {
'user': self.relateduser_string,
'tag': tag_displayname
}
def get_queryset_for_role(self, role):
return super(AddRelatedUserToTagMultiSelectView, self)\
.get_queryset_for_role(role=role)\
.exclude(periodtag__id=self.tag_id)
def add_related_users(self, period_tag, related_users):
with transaction.atomic():
for related_user in related_users:
related_user.periodtag_set.add(period_tag)
def form_valid(self, form):
period_tag = self.get_period_tag()
related_users = form.cleaned_data['selected_items']
self.add_related_users(period_tag=period_tag, related_users=related_users)
self.add_success_message(
message=gettext_lazy(
'%(number_users)d %(user_string)s added successfully.'
) % {
'number_users': len(related_users),
'user_string': self.relateduser_string,
}
)
return super(AddRelatedUserToTagMultiSelectView, self).form_valid(form=form)
class RemoveRelatedUserFromTagMultiSelectView(BaseRelatedUserMultiSelectView):
"""
Remove related users from a :class:`~.devilry.apps.core.models.period_tag.PeriodTag`.
"""
def get_pagetitle(self):
tag_displayname = self.get_period_tag().displayname
return gettext_lazy(
'Remove %(user)s from %(tag)s'
) % {
'user': self.relateduser_string,
'tag': tag_displayname
}
def get_queryset_for_role(self, role):
return super(RemoveRelatedUserFromTagMultiSelectView, self)\
.get_queryset_for_role(role=role)\
.filter(periodtag__id=self.tag_id)
def remove_related_users(self, period_tag, related_users):
with transaction.atomic():
for related_user in related_users:
related_user.periodtag_set.remove(period_tag)
def form_valid(self, form):
period_tag = self.get_period_tag()
related_users = form.cleaned_data['selected_items']
self.remove_related_users(period_tag=period_tag, related_users=related_users)
self.add_success_message(
message=gettext_lazy(
'%(number_users)d %(user_string)s removed successfully'
) % {
'number_users': len(related_users),
'user_string': self.relateduser_string
}
)
return super(RemoveRelatedUserFromTagMultiSelectView, self).form_valid(form=form)
class SelectedRelatedExaminerForm(SelectedRelatedUsersForm):
invalid_item_selected_message = gettext_lazy('Invalid examiner was selected.')
class SelectedRelatedStudentForm(SelectedRelatedUsersForm):
invalid_item_selected_message = gettext_lazy('Invalid student was selected.')
class ExaminerMultiSelectViewMixin(object):
model = RelatedExaminer
relateduser_string = gettext_lazy('examiner')
form_class = SelectedRelatedExaminerForm
class StudentMultiSelectViewMixin(object):
model = RelatedStudent
relateduser_string = gettext_lazy('student')
form_class = SelectedRelatedStudentForm
class RelatedExaminerAddView(ExaminerMultiSelectViewMixin, AddRelatedUserToTagMultiSelectView):
"""
Multi-select add view for :class:`~.devilry.apps.core.models.relateduser.RelatedExaminer`.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'add_examiners_filter', kwargs={
'tag_id': self.tag_id,
'filters_string': filters_string
})
class RelatedExaminerRemoveView(ExaminerMultiSelectViewMixin, RemoveRelatedUserFromTagMultiSelectView):
"""
Multi-select remove view for :class:`~.devilry.apps.core.models.relateduser.RelatedExaminer`.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'remove_examiners_filter', kwargs={
'tag_id': self.tag_id,
'filters_string': filters_string
})
class RelatedStudentAddView(StudentMultiSelectViewMixin, AddRelatedUserToTagMultiSelectView):
"""
Multi-select add view for :class:`~.devilry.apps.core.models.relateduser.RelatedStudent`.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'add_students_filter', kwargs={
'tag_id': self.tag_id,
'filters_string': filters_string
})
class RelatedStudentRemoveView(StudentMultiSelectViewMixin, RemoveRelatedUserFromTagMultiSelectView):
"""
Multi-select remove view for :class:`~.devilry.apps.core.models.relateduser.RelatedStudent`.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'remove_students_filter', kwargs={
'tag_id': self.tag_id,
'filters_string': filters_string
})
class App(crapp.App):
appurls = [
crapp.Url(r'^$',
TagListBuilderListView.as_view(),
name=crapp.INDEXVIEW_NAME),
crapp.Url(r'^filter/(?P<filters_string>.+)?$',
TagListBuilderListView.as_view(),
name='filter'),
crapp.Url(r'^add$',
AddTagsView.as_view(),
name='add_tag'),
crapp.Url(r'^edit/(?P<pk>\d+)$',
EditTagView.as_view(),
name='edit'),
crapp.Url(r'^delete/(?P<pk>\d+)$',
DeleteTagView.as_view(),
name='delete'),
crapp.Url(r'^toggle-visibility$',
HideShowPeriodTag.as_view(),
name='toggle_visibility'),
crapp.Url('^add-examiners/(?P<tag_id>\d+)$',
RelatedExaminerAddView.as_view(),
name='add_examiners'),
crapp.Url('^add-examiners/(?P<tag_id>\d+)/(?P<filters_string>.+)?$',
RelatedExaminerAddView.as_view(),
name='add_examiners_filter'),
crapp.Url('^remove-examiners/(?P<tag_id>\d+)$',
RelatedExaminerRemoveView.as_view(),
name='remove_examiners'),
crapp.Url('^remove-examiners/(?P<tag_id>\d+)/(?P<filters_string>.+)?$',
RelatedExaminerRemoveView.as_view(),
name='remove_examiners_filter'),
crapp.Url('^add-students/(?P<tag_id>\d+)$',
RelatedStudentAddView.as_view(),
name='add_students'),
crapp.Url('^add-students/(?P<tag_id>\d+)/(?P<filters_string>.+)?$',
RelatedStudentAddView.as_view(),
name='add_students_filter'),
crapp.Url('^remove-students/(?P<tag_id>\d+)$',
RelatedStudentRemoveView.as_view(),
name='remove_students'),
crapp.Url('^remove-students/(?P<tag_id>\d+)/(?P<filters_string>.+)?$',
RelatedStudentRemoveView.as_view(),
name='remove_students_filter'),
]
| bsd-3-clause | c870d67fd9f357383c9c8d0f4fe80901 | 36.523883 | 111 | 0.628834 | 3.837535 | false | false | false | false |
mozilla/zamboni | mkt/site/tests/test_storage_utils.py | 7 | 5118 | from functools import partial
import os
import tempfile
import unittest
from django.core.files.base import ContentFile
from django.test.utils import override_settings
import mock
from nose.tools import eq_
from mkt.site.storage_utils import (copy_stored_file, get_private_storage,
get_public_storage, local_storage,
move_stored_file, private_storage,
storage_is_remote, walk_storage)
from mkt.site.tests import TestCase
from mkt.site.utils import rm_local_tmp_dir
def test_storage_walk():
tmp = tempfile.mkdtemp()
jn = partial(os.path.join, tmp)
try:
private_storage.save(jn('file1.txt'), ContentFile(''))
private_storage.save(jn('one/file1.txt'), ContentFile(''))
private_storage.save(jn('one/file2.txt'), ContentFile(''))
private_storage.save(jn('one/two/file1.txt'), ContentFile(''))
private_storage.save(jn('one/three/file1.txt'), ContentFile(''))
private_storage.save(jn('four/five/file1.txt'), ContentFile(''))
private_storage.save(jn(u'four/kristi\u2603/kristi\u2603.txt'),
ContentFile(''))
results = [(dir, set(subdirs), set(files))
for dir, subdirs, files in
sorted(walk_storage(tmp, storage=private_storage))]
yield (eq_, results.pop(0),
(tmp, set(['four', 'one']), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('four'), set(['five', 'kristi\xe2\x98\x83']), set([])))
yield (eq_, results.pop(0),
(jn('four/five'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('four/kristi\xe2\x98\x83'), set([]),
set(['kristi\xe2\x98\x83.txt'])))
yield (eq_, results.pop(0),
(jn('one'), set(['three', 'two']),
set(['file1.txt', 'file2.txt'])))
yield (eq_, results.pop(0),
(jn('one/three'), set([]), set(['file1.txt'])))
yield (eq_, results.pop(0),
(jn('one/two'), set([]), set(['file1.txt'])))
yield (eq_, len(results), 0)
finally:
rm_local_tmp_dir(tmp)
class TestFileOps(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def path(self, path):
return os.path.join(self.tmp, path)
def contents(self, path):
with private_storage.open(path, 'rb') as fp:
return fp.read()
def newfile(self, name, contents):
src = self.path(name)
private_storage.save(src, ContentFile(contents))
return src
def test_copy(self):
src = self.newfile('src.txt', '<contents>')
dst = self.path('somedir/dst.txt')
copy_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), '<contents>')
def test_self_copy(self):
src = self.newfile('src.txt', '<contents>')
dst = self.path('src.txt')
copy_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), '<contents>')
def test_move(self):
src = self.newfile('src.txt', '<contents>')
dst = self.path('somedir/dst.txt')
move_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), '<contents>')
eq_(private_storage.exists(src), False)
def test_non_ascii(self):
src = self.newfile(u'kristi\u0107.txt',
u'ivan kristi\u0107'.encode('utf8'))
dst = self.path(u'somedir/kristi\u0107.txt')
copy_stored_file(
src, dst,
src_storage=private_storage, dst_storage=private_storage)
eq_(self.contents(dst), 'ivan kristi\xc4\x87')
class TestStorageClasses(TestCase):
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.S3BotoPrivateStorage')
def test_get_storage_remote(self):
assert storage_is_remote()
eq_(get_private_storage().__class__.__name__, 'S3BotoPrivateStorage')
eq_(get_public_storage().__class__.__name__, 'S3BotoPublicStorage')
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.LocalFileStorage')
def test_get_storage_local(self):
assert not storage_is_remote()
eq_(get_private_storage().__class__.__name__, 'LocalFileStorage')
eq_(get_public_storage().__class__.__name__, 'LocalFileStorage')
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.LocalFileStorage')
@mock.patch('mkt.site.storage_utils.shutil.copyfileobj')
def test_copy_stored_file_when_local(self, mock):
tmp = tempfile.mkstemp()[1]
copy_stored_file(tmp, tmp, src_storage=local_storage,
dst_storage=private_storage)
assert not mock.called
local_storage.delete(tmp)
| bsd-3-clause | 47b80e47f0e3b9e258e17c15d63fed53 | 36.357664 | 77 | 0.579523 | 3.622081 | false | true | false | false |
mozilla/zamboni | mkt/reviewers/migrations/0001_initial.py | 13 | 4165 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AdditionalReview',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('queue', models.CharField(max_length=30)),
('passed', models.NullBooleanField()),
('review_completed', models.DateTimeField(null=True)),
('comment', models.CharField(max_length=255, null=True, blank=True)),
],
options={
'db_table': 'additional_review',
'get_latest_by': 'created',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CannedResponse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('sort_group', models.CharField(max_length=255)),
],
options={
'db_table': 'cannedresponses',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EditorSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'editor_subscriptions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EscalationQueue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'escalation_queue',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RereviewQueue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'rereview_queue',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ReviewerScore',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True)),
('score', models.SmallIntegerField()),
('note_key', models.SmallIntegerField(default=0, choices=[(0, 'Manual Reviewer Points'), (70, 'Web App Review'), (71, 'Packaged App Review'), (72, 'Web App Re-review'), (73, 'Updated Packaged App Review'), (74, 'Privileged App Review'), (75, 'Updated Privileged App Review'), (81, 'Moderated App Review'), (82, 'App Review Moderation Reverted'), (90, 'Tarako App Review'), (100, 'App Abuse Report Read'), (101, 'Website Abuse Report Read')])),
('note', models.CharField(max_length=255, blank=True)),
],
options={
'ordering': ('-created',),
'db_table': 'reviewer_scores',
},
bases=(models.Model,),
),
]
| bsd-3-clause | beb8c91c4dc063de776973c75937ea93 | 42.842105 | 459 | 0.527251 | 4.507576 | false | false | false | false |
mozilla/zamboni | mkt/ratings/helpers.py | 6 | 1160 | import jingo
from django.utils.translation import ugettext as _
from mkt.access import acl
@jingo.register.filter
def stars(num, large=False):
# check for 0.0 incase None was cast to a float. Should
# be safe since lowest rating you can give is 1.0
if num is None or num == 0.0:
return _('Not yet reviewed')
else:
num = min(5, int(round(num)))
return _('Reviewed %s out of 5 stars' % num)
def user_can_delete_review(request, review):
"""Return whether or not the request.user can delete reviews.
People who can delete reviews:
* The original review author.
* Reviewers, but only if they aren't listed as an author of the add-on.
* Users in a group with "Users:Edit" privileges.
* Users in a group with "Apps:ModerateReview" privileges.
"""
is_editor = acl.check_reviewer(request)
is_author = review.addon.has_author(request.user)
return (
review.user_id == request.user.id or
not is_author and (
is_editor or
acl.action_allowed(request, 'Users', 'Edit') or
acl.action_allowed(request, 'Apps', 'ModerateReview')))
| bsd-3-clause | cf764bcf42ea445b2a68799771febe64 | 32.142857 | 77 | 0.64569 | 3.625 | false | false | false | false |
mozilla/zamboni | mkt/zadmin/management/commands/removeuserfromgroup.py | 5 | 1468 | from django.core.management.base import BaseCommand, CommandError
import commonware.log
from mkt.access.models import Group, GroupUser
from mkt.users.models import UserProfile
class Command(BaseCommand):
help = ('Remove a user from a group. Syntax: \n'
' ./manage.py removeuserfromgroup <user_id|email> <group_id>')
log = commonware.log.getLogger('z.users')
use_argparse = False
def handle(self, *args, **options):
try:
do_removeuser(args[0], args[1])
msg = 'Removing %s from %s\n' % (args[0], args[1])
self.log.info(msg)
self.stdout.write(msg)
except IndexError:
raise CommandError(self.help)
def do_removeuser(user, group):
try:
if '@' in user:
user = UserProfile.objects.get(email=user)
elif user.isdigit():
user = UserProfile.objects.get(pk=user)
else:
raise CommandError('Unknown input for user.')
if group.isdigit():
group = Group.objects.get(pk=group)
else:
raise CommandError('Group must be a valid ID.')
# Doesn't actually check if the user was in the group or not.
GroupUser.objects.filter(user=user, group=group).delete()
except UserProfile.DoesNotExist:
raise CommandError('User (%s) does not exist.' % user)
except Group.DoesNotExist:
raise CommandError('Group (%s) does not exist.' % group)
| bsd-3-clause | 04a8f7646f4e3b89b08331b1e22b6b98 | 30.234043 | 77 | 0.616485 | 4.021918 | false | false | false | false |
mozilla/zamboni | mkt/constants/ratingsbodies.py | 4 | 9472 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _lazy
NAME_GENERAL = _lazy('For all ages')
# L10n: %d is the age in years. For ages %d and higher.
NAME_LAZY = _lazy('For ages %d+') # Fill this in after accessing.
NAME_REJECTED = _lazy(u'Rating Rejected')
NAME_PENDING = _lazy(u'Rating Pending')
class RATING(object):
"""
Content rating.
iarc_name -- how IARC names the rating, to talk with IARC.
age -- minimum age of the rating's age recommendation.
name -- how we name the rating, for translated display on all pages.
label -- for CSS classes, to create icons.
"""
age = None
name = None
label = None
adult = False
class RATING_BODY(object):
"""
Content rating body.
iarc_name -- how IARC names the ratings body, to talk with IARC.
ratings -- list of RATINGs associated with this body.
name -- for general translated display on all pages.
label -- for CSS classes, to create icons.
description -- for general translated display on all pages.
full_name -- in case we ever want to display the full translated name.
url -- in case we ever want to link to the ratings body page for more info.
"""
label = None
class CLASSIND_L(RATING):
id = 0
age = 0
iarc_name = 'Livre'
class CLASSIND_10(RATING):
id = 1
age = 10
iarc_name = '10+'
class CLASSIND_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class CLASSIND_14(RATING):
id = 3
age = 14
iarc_name = '14+'
class CLASSIND_16(RATING):
id = 4
age = 16
iarc_name = '16+'
class CLASSIND_18(RATING):
id = 5
age = 18
iarc_name = '18+'
adult = True
class CLASSIND(RATING_BODY):
"""
The Brazilian game ratings body (aka. DEJUS, DJCTQ).
"""
id = 0
iarc_name = 'CLASSIND'
ratings = (CLASSIND_L, CLASSIND_10, CLASSIND_12, CLASSIND_14, CLASSIND_16,
CLASSIND_18)
name = 'CLASSIND'
description = _lazy(u'Brazil')
full_name = _lazy(u'Department of Justice, Rating, Titles and '
u'Qualification')
url = ('http://portal.mj.gov.br/classificacao/data/Pages/'
'MJ6BC270E8PTBRNN.htm')
class GENERIC_3(RATING):
id = 0
age = 3
iarc_name = '3+'
class GENERIC_7(RATING):
id = 1
age = 7
iarc_name = '7+'
class GENERIC_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class GENERIC_16(RATING):
id = 3
age = 16
iarc_name = '16+'
class GENERIC_18(RATING):
id = 4
age = 18
iarc_name = '18+'
adult = True
class GENERIC_RP(RATING):
id = 5
iarc_name = 'RP'
label = 'pending'
name = NAME_PENDING
class GENERIC(RATING_BODY):
"""
The generic game ratings body (used in Germany, for example).
"""
id = 1
iarc_name = 'Generic'
ratings = (GENERIC_3, GENERIC_7, GENERIC_12, GENERIC_16, GENERIC_18,
GENERIC_RP)
name = _lazy('Generic')
description = '' # No comment.
full_name = _lazy(u'Generic')
class USK_0(RATING):
id = 0
age = 0
iarc_name = '0+'
class USK_6(RATING):
id = 1
age = 6
iarc_name = '6+'
class USK_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class USK_16(RATING):
id = 3
age = 16
iarc_name = '16+'
class USK_18(RATING):
id = 4
age = 18
iarc_name = '18+'
adult = True
class USK_REJECTED(RATING):
id = 5
iarc_name = 'Rating Refused'
label = 'rating-refused'
name = NAME_REJECTED
class USK(RATING_BODY):
"""
The organization responsible for game ratings in Germany
(aka. Unterhaltungssoftware Selbstkontrolle).
"""
id = 2
iarc_name = 'USK'
ratings = (USK_0, USK_6, USK_12, USK_16, USK_18, USK_REJECTED)
name = 'USK'
description = _lazy(u'Germany')
full_name = _lazy(u'Entertainment Software Self-Regulation Body')
url = 'http://www.usk.de/en/'
class ESRB_E(RATING):
"""Everybody."""
id = 0
age = 0
iarc_name = 'Everyone'
name = _lazy('Everyone')
class ESRB_10(RATING):
id = 1
age = 10
iarc_name = 'Everyone 10+'
name = _lazy('Everyone 10+') # L10n: `10+` is age ten and over.
class ESRB_T(RATING):
id = 2
age = 13
iarc_name = 'Teen'
name = _lazy('Teen')
class ESRB_M(RATING):
id = 3
age = 17
iarc_name = 'Mature 17+'
name = _lazy('Mature 17+') # L10n: `17+` is age seventeen and over.
class ESRB_A(RATING):
id = 4
age = 18
iarc_name = 'Adults Only'
name = _lazy('Adults Only 18+') # L10n: `18+` is age eighteen and over.
adult = True
class ESRB(RATING_BODY):
"""
The North American game ratings body (i.e. USA, Canada).
"""
id = 3
iarc_name = 'ESRB'
ratings = (ESRB_E, ESRB_10, ESRB_T, ESRB_M, ESRB_A)
name = 'ESRB'
# L10n: North and South American, but not Brazil.
description = _lazy(u'All Americas except Brazil')
full_name = _lazy(u'Entertainment Software Rating Board')
url = 'http://esrb.org'
class PEGI_3(RATING):
id = 0
age = 3
iarc_name = '3+'
class PEGI_7(RATING):
id = 1
age = 7
iarc_name = '7+'
class PEGI_12(RATING):
id = 2
age = 12
iarc_name = '12+'
class PEGI_16(RATING):
id = 3
age = 16
iarc_name = '16+'
class PEGI_18(RATING):
id = 4
age = 18
iarc_name = '18+'
adult = True
class PEGI_PARENTAL_GUIDANCE(RATING):
id = 5
iarc_name = 'Parental Guidance'
label = 'parental-guidance'
name = _lazy(u'Parental Guidance')
class PEGI(RATING_BODY):
"""
The European game ratings body (i.e. GBR, Poland, Spain).
"""
id = 4
iarc_name = 'PEGI'
ratings = (PEGI_3, PEGI_7, PEGI_12, PEGI_16, PEGI_18,
PEGI_PARENTAL_GUIDANCE)
name = 'PEGI'
description = _lazy(u'Europe')
full_name = _lazy(u'Pan European Game Information')
url = 'http://www.pegi.info'
RATINGS_BODIES = {
CLASSIND.id: CLASSIND,
GENERIC.id: GENERIC,
USK.id: USK,
ESRB.id: ESRB,
PEGI.id: PEGI,
}
# Attach ratings bodies to ratings.
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
r.ratingsbody = rb
ALL_RATINGS_BODIES = [CLASSIND, GENERIC, USK, ESRB, PEGI]
def ALL_RATINGS():
"""
List of all ratings with waffled bodies.
"""
ALL_RATINGS = []
for rb in RATINGS_BODIES.values():
ALL_RATINGS.extend(rb.ratings)
return ALL_RATINGS
def RATINGS_BY_NAME():
"""
Create a list of tuples (choices) after we know the locale since this
attempts to concatenate two lazy translations in constants file.
"""
all_ratings = ALL_RATINGS()
ratings_choices = []
for rb in RATINGS_BODIES.values():
for r in rb.ratings:
ratings_choices.append(
(all_ratings.index(r),
u'%s - %s' % (rb.name, dehydrate_rating(r).name)))
return ratings_choices
def slugify_iarc_name(obj):
"""
Converts ratings body's or rating's iarc_name to a slug-like label
(e.g. "USK" to "usk").
"""
return obj.iarc_name.lower().replace(' ', '-')
def dehydrate_rating(rating_class):
"""
Returns a rating with translated fields attached and with fields that are
easily created dynamically.
"""
rating = rating_class()
if rating.label is None:
rating.label = str(rating.age) or slugify_iarc_name(rating)
if rating.name is None:
if rating.age == 0:
rating.name = unicode(NAME_GENERAL)
else:
rating.name = unicode(NAME_LAZY) % rating.age
rating.name = unicode(rating.name)
return rating
def dehydrate_ratings_body(body_class):
"""Returns a rating body with translated fields attached."""
body = body_class()
if body.label is None:
body.label = slugify_iarc_name(body)
body.name = unicode(body.name)
body.description = unicode(body.description)
return body
def pth(path):
"""Prepends root icon path to path."""
return 'img/icons/ratings/' + path
IARC_ICONS = {
'ratings': {
# The keys are ratings' labels.
'classind': {
'0': pth('CLASSIND_L.png'),
'10': pth('CLASSIND_10.png'),
'12': pth('CLASSIND_12.png'),
'14': pth('CLASSIND_14.png'),
'16': pth('CLASSIND_16.png'),
'18': pth('CLASSIND_18.png'),
},
'esrb': {
'0': pth('ESRB_e.png'),
'10': pth('ESRB_e10.png'),
'13': pth('ESRB_t.png'),
'17': pth('ESRB_m.png'),
'18': pth('ESRB_ao.png'),
},
'generic': {
'3': pth('generic_3.png'),
'7': pth('generic_7.png'),
'12': pth('generic_12.png'),
'16': pth('generic_16.png'),
'18': pth('generic_18.png'),
'pending': pth('generic_rp.png'),
},
'pegi': {
'3': pth('pegi_3.png'),
'7': pth('pegi_7.png'),
'12': pth('pegi_12.png'),
'16': pth('pegi_16.png'),
'18': pth('pegi_18.png'),
'parental-guidance': pth('pegi_exclamation.png'),
},
'usk': {
'0': pth('USK_0.png'),
'6': pth('USK_6.png'),
'12': pth('USK_12.png'),
'16': pth('USK_16.png'),
'18': pth('USK_18.png'),
'rating-refused': pth('USK_RR.png')
}
}
}
| bsd-3-clause | 74e8b2c128be5c903de2863beba3dd86 | 20.625571 | 79 | 0.561339 | 2.994625 | false | false | false | false |
mozilla/zamboni | mkt/account/serializers.py | 6 | 5566 | from functools import partial
from rest_framework import fields, serializers
import mkt
from mkt.access import acl
from mkt.access.models import Group
from mkt.api.serializers import PotatoCaptchaSerializer
from mkt.users.models import UserProfile
class AccountSerializer(serializers.ModelSerializer):
display_name = serializers.CharField(required=True)
class Meta:
model = UserProfile
fields = ['display_name', 'enable_recommendations']
def to_representation(self, instance):
"""Return obj.name instead of display_name to handle users without
a valid display_name."""
data = super(AccountSerializer, self).to_representation(instance)
data["display_name"] = instance.name
return data
class AccountInfoSerializer(serializers.ModelSerializer):
ALLOWED_SOURCES = [mkt.LOGIN_SOURCE_FXA]
source = serializers.CharField(read_only=True)
verified = serializers.BooleanField(source='is_verified', read_only=True)
class Meta:
model = UserProfile
fields = ['source', 'verified']
def to_representation(self, obj):
"""Return the sources slug instead of the id."""
data = super(AccountInfoSerializer, self).to_representation(obj)
if obj.pk is None:
source = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_UNKNOWN]
elif obj.source in self.ALLOWED_SOURCES:
source = mkt.LOGIN_SOURCE_LOOKUP[obj.source]
else:
source = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_BROWSERID]
data["source"] = source
return data
class FeedbackSerializer(PotatoCaptchaSerializer):
feedback = fields.CharField(allow_blank=False)
chromeless = fields.CharField(required=False)
from_url = fields.CharField(required=False)
user = fields.ReadOnlyField(required=False)
platform = fields.CharField(required=False, allow_null=True)
def to_representation(self, attrs):
attrs = super(FeedbackSerializer, self).to_representation(attrs)
if not attrs.get('platform'):
attrs['platform'] = self.request.GET.get('dev', '')
if self.request.user.is_authenticated():
attrs['user'] = unicode(self.request.user)
else:
attrs['user'] = None
return attrs
class LoginSerializer(serializers.Serializer):
assertion = fields.CharField(required=True)
audience = fields.CharField(required=False)
is_mobile = fields.BooleanField(required=False, default=False)
class FxALoginSerializer(serializers.Serializer):
auth_response = fields.CharField(required=True)
state = fields.CharField(required=True)
class NewsletterSerializer(serializers.Serializer):
NEWSLETTER_CHOICES_API = {
# string passed to the API : actual string passed to basket.
'about:apps': 'mozilla-and-you,marketplace-desktop',
'marketplace-firefoxos': 'marketplace',
'marketplace-desktop': 'mozilla-and-you',
'marketplace-android': 'mozilla-and-you'
}
email = fields.EmailField()
newsletter = fields.ChoiceField(
default='marketplace-firefoxos',
required=False,
choices=NEWSLETTER_CHOICES_API.items())
lang = fields.CharField()
def to_representation(self, obj):
"""Transform from the string the API receives to the one we need to
pass to basket."""
data = super(NewsletterSerializer, self).to_representation(obj)
default = self.fields['newsletter'].default
data['newsletter'] = self.NEWSLETTER_CHOICES_API.get(obj['newsletter'],
default)
return data
class PermissionsSerializer(serializers.Serializer):
permissions = fields.SerializerMethodField()
def get_permissions(self, obj):
request = self.context['request']
allowed = partial(acl.action_allowed, request)
permissions = {
'admin': allowed('Admin', '%'),
'developer': request.user.is_developer,
'localizer': allowed('Localizers', '%'),
'lookup': allowed('AccountLookup', '%'),
'curator': (
allowed('Collections', 'Curate') or
allowed('Feed', 'Curate')
),
'reviewer': allowed('Apps', 'Review'),
'webpay': (allowed('Transaction', 'NotifyFailure') and
allowed('ProductIcon', 'Create')),
'website_submitter': allowed('Websites', 'Submit'),
'stats': allowed('Stats', 'View'),
'revenue_stats': allowed('RevenueStats', 'View'),
'content_tools_addon_review': allowed('ContentTools',
'AddonReview'),
}
return permissions
class UserSerializer(AccountSerializer):
"""
A wacky serializer type that unserializes PK numbers and
serializes user fields.
"""
resource_uri = serializers.HyperlinkedRelatedField(
view_name='account-settings', source='pk',
read_only=True)
class Meta:
model = UserProfile
fields = ('display_name', 'resource_uri')
class GroupsSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ('id', 'name', 'restricted')
read_only_fields = ('id', 'name', 'restricted')
class TOSSerializer(serializers.Serializer):
has_signed = fields.SerializerMethodField()
def get_has_signed(self, obj):
return (self.context['request'].user.read_dev_agreement is not None)
| bsd-3-clause | f4a8a96d3ae72c167ad0903950e97141 | 33.7875 | 79 | 0.642831 | 4.334891 | false | false | false | false |
mozilla/zamboni | mkt/comm/filters.py | 6 | 1440 | from django.db.models import Q
from rest_framework.filters import BaseFilterBackend
from mkt.users.models import UserProfile
class NoteContentFilter(BaseFilterBackend):
"""
Filter that searches note content based on `q`.
Query must be at least two characters.
"""
def filter_queryset(self, request, queryset, view):
q = request.GET.get('q', '').lower()
if not q or len(q) < 3:
return queryset
# Get notes where body matches search query.
note_ids = list((queryset.filter(body__icontains=q)
.values_list('id', flat=True)))
# Combine w/ notes where search query matches author user profile.
note_ids += filter(None, UserProfile.objects.filter(
Q(email__icontains=q) | Q(display_name__icontains=q)
).values_list('comm_notes', flat=True))
return queryset.filter(id__in=note_ids)
class NoteContentTypeFilter(BaseFilterBackend):
"""
Filters apps vs. add-ons based on `doc_type`.
"""
def filter_queryset(self, request, queryset, view):
doc_type = request.GET.get('doc_type', '').lower()
if not doc_type:
return queryset
if doc_type == 'extension':
queryset = queryset.filter(thread___extension__isnull=False)
if doc_type == 'webapp':
queryset = queryset.filter(thread___addon__isnull=False)
return queryset
| bsd-3-clause | 3d9c63a6a8048b24ffaa39163fecd159 | 35 | 74 | 0.623611 | 4.149856 | false | false | false | false |
mozilla/zamboni | mkt/site/management/commands/resave_purified_translations.py | 6 | 1651 | from django.core.management.base import BaseCommand
from mkt.feed.models import (FeedApp, FeedCollection, FeedCollectionMembership,
FeedShelf, FeedShelfMembership)
from mkt.versions.models import Version
from mkt.webapps.models import Webapp
from mkt.site.tasks import update_translations
from mkt.site.utils import chunked
class Command(BaseCommand):
help = "Re-save PurifiedTranslation fields to remove outgoing URLs."
def handle(self, *args, **kwargs):
ids = []
ids.extend(
FeedCollectionMembership.objects.values_list('group', flat=True))
ids.extend(
FeedCollection.objects.values_list('name', flat=True))
ids.extend(
FeedCollection.objects.values_list('description', flat=True))
ids.extend(
FeedShelfMembership.objects.values_list('group', flat=True))
ids.extend(
FeedShelf.objects.values_list('description', flat=True))
ids.extend(
FeedShelf.objects.values_list('name', flat=True))
ids.extend(
FeedApp.objects.values_list('description', flat=True))
ids.extend(
FeedApp.objects.values_list('pullquote_text', flat=True))
ids.extend(
Version.objects.values_list('releasenotes', flat=True))
ids.extend(
Webapp.objects.values_list('description', flat=True))
ids.extend(
Webapp.objects.values_list('privacy_policy', flat=True))
# Filter out any None's.
ids = filter(None, ids)
for chunk in chunked(ids, 100):
update_translations.delay(chunk)
| bsd-3-clause | e0957bc62a53df739fc5c874e226e538 | 35.688889 | 79 | 0.642035 | 4.148241 | false | false | false | false |
mozilla/zamboni | mkt/feed/indexers.py | 17 | 12728 | """
Indexers for FeedApp, FeedBrand, FeedCollection, FeedShelf, FeedItem for
feed homepage and curation tool search.
"""
import mkt.carriers
import mkt.feed.constants as feed
import mkt.regions
from mkt.search.indexers import BaseIndexer
from mkt.translations.models import attach_trans_dict
from mkt.webapps.models import Webapp
def get_slug_multifield():
# TODO: convert to new syntax on ES 1.0+.
return {
'type': 'multi_field',
'fields': {
'slug': {'type': 'string'},
'raw': {'type': 'string', 'index': 'not_analyzed'},
}
}
class FeedAppIndexer(BaseIndexer):
@classmethod
def get_model(cls):
"""Returns the Django model this MappingType relates to"""
from mkt.feed.models import FeedApp
return FeedApp
@classmethod
def get_mapping(cls):
"""Returns an Elasticsearch mapping for this MappingType"""
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'long'},
'background_color': cls.string_not_analyzed(),
'color': cls.string_not_analyzed(),
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'image_hash': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'preview': {'type': 'object', 'dynamic': 'true'},
'pullquote_attribution': cls.string_not_analyzed(),
'pullquote_rating': {'type': 'short'},
'pullquote_text': {'type': 'string',
'analyzer': 'default_icu'},
'search_names': {'type': 'string',
'analyzer': 'default_icu'},
'slug': get_slug_multifield(),
'type': cls.string_not_analyzed(),
}
}
}
return cls.attach_translation_mappings(mapping, ('description',))
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Converts this instance into an Elasticsearch document"""
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
# Attach translations for searching and indexing.
attach_trans_dict(cls.get_model(), [obj])
attach_trans_dict(Webapp, [obj.app])
doc = {
'id': obj.id,
'app': obj.app_id,
'background_color': obj.background_color,
'color': obj.color,
'created': obj.created,
'image_hash': obj.image_hash,
'item_type': feed.FEED_TYPE_APP,
'preview': {'id': obj.preview.id,
'thumbnail_size': obj.preview.thumbnail_size,
'thumbnail_url': obj.preview.thumbnail_url}
if getattr(obj, 'preview') else None,
'pullquote_attribution': obj.pullquote_attribution,
'pullquote_rating': obj.pullquote_rating,
'search_names': list(
set(string for _, string
in obj.app.translations[obj.app.name_id])),
'slug': obj.slug,
'type': obj.type,
}
# Handle localized fields.
for field in ('description', 'pullquote_text'):
doc.update(cls.extract_field_translations(obj, field))
return doc
class FeedBrandIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedBrand
return FeedBrand
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
return {
doc_type: {
'properties': {
'id': {'type': 'long'},
'apps': {'type': 'long'},
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'layout': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'slug': get_slug_multifield(),
'type': {'type': 'string'},
}
}
}
@classmethod
def extract_document(cls, pk=None, obj=None):
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
return {
'id': obj.id,
'apps': list(obj.apps().values_list('id', flat=True)),
'created': obj.created,
'layout': obj.layout,
'item_type': feed.FEED_TYPE_BRAND,
'slug': obj.slug,
'type': obj.type,
}
class FeedCollectionIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedCollection
return FeedCollection
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'properties': {
'id': {'type': 'long'},
'apps': {'type': 'long'},
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'background_color': cls.string_not_analyzed(),
'color': cls.string_not_analyzed(),
'group_apps': {'type': 'object', 'dynamic': 'true'},
'group_names': {'type': 'object', 'dynamic': 'true'},
'image_hash': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'search_names': {'type': 'string',
'analyzer': 'default_icu'},
'slug': get_slug_multifield(),
'type': cls.string_not_analyzed(),
}
}
}
return cls.attach_translation_mappings(mapping, ('description',
'name'))
@classmethod
def extract_document(cls, pk=None, obj=None):
from mkt.feed.models import FeedCollectionMembership
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
attach_trans_dict(cls.get_model(), [obj])
doc = {
'id': obj.id,
'apps': list(obj.apps().values_list('id', flat=True)),
'background_color': obj.background_color,
'color': obj.color,
'created': obj.created,
'group_apps': {}, # Map of app IDs to index in group_names below.
'group_names': [], # List of ES-serialized group names.
'image_hash': obj.image_hash,
'item_type': feed.FEED_TYPE_COLL,
'search_names': list(
set(string for _, string
in obj.translations[obj.name_id])),
'slug': obj.slug,
'type': obj.type,
}
# Grouped apps. Key off of translation, pointed to app IDs.
memberships = obj.feedcollectionmembership_set.all()
attach_trans_dict(FeedCollectionMembership, memberships)
for member in memberships:
if member.group:
group_translation = cls.extract_field_translations(member,
'group')
if group_translation not in doc['group_names']:
doc['group_names'].append(group_translation)
doc['group_apps'][member.app_id] = (
doc['group_names'].index(group_translation))
# Handle localized fields.
for field in ('description', 'name'):
doc.update(cls.extract_field_translations(obj, field))
return doc
class FeedShelfIndexer(BaseIndexer):
@classmethod
def get_model(cls):
from mkt.feed.models import FeedShelf
return FeedShelf
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'properties': {
'id': {'type': 'long'},
'apps': {'type': 'long'},
'carrier': cls.string_not_analyzed(),
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'group_apps': {'type': 'object', 'dynamic': 'true'},
'group_names': {'type': 'object', 'dynamic': 'true'},
'image_hash': cls.string_not_analyzed(),
'image_landing_hash': cls.string_not_analyzed(),
'item_type': cls.string_not_analyzed(),
'region': cls.string_not_analyzed(),
'search_names': {'type': 'string',
'analyzer': 'default_icu'},
'slug': get_slug_multifield(),
}
}
}
return cls.attach_translation_mappings(mapping, ('description',
'name'))
@classmethod
def extract_document(cls, pk=None, obj=None):
from mkt.feed.models import FeedShelfMembership
if obj is None:
obj = cls.get_model().get(pk=pk)
attach_trans_dict(cls.get_model(), [obj])
doc = {
'id': obj.id,
'apps': list(obj.apps().values_list('id', flat=True)),
'carrier': mkt.carriers.CARRIER_CHOICE_DICT[obj.carrier].slug,
'created': obj.created,
'group_apps': {}, # Map of app IDs to index in group_names below.
'group_names': [], # List of ES-serialized group names.
'image_hash': obj.image_hash,
'image_landing_hash': obj.image_landing_hash,
'item_type': feed.FEED_TYPE_SHELF,
'region': mkt.regions.REGIONS_CHOICES_ID_DICT[obj.region].slug,
'search_names': list(set(string for _, string
in obj.translations[obj.name_id])),
'slug': obj.slug,
}
# Grouped apps. Key off of translation, pointed to app IDs.
memberships = obj.feedshelfmembership_set.all()
attach_trans_dict(FeedShelfMembership, memberships)
for member in memberships:
if member.group:
group_translation = cls.extract_field_translations(member,
'group')
if group_translation not in doc['group_names']:
doc['group_names'].append(group_translation)
doc['group_apps'][member.app_id] = (
doc['group_names'].index(group_translation))
# Handle localized fields.
for field in ('description', 'name'):
doc.update(cls.extract_field_translations(obj, field))
return doc
class FeedItemIndexer(BaseIndexer):
chunk_size = 1000
@classmethod
def get_model(cls):
from mkt.feed.models import FeedItem
return FeedItem
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
return {
doc_type: {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'long'},
'brand': {'type': 'long'},
'carrier': {'type': 'integer'},
'category': {'type': 'integer'},
'collection': {'type': 'long'},
'item_type': cls.string_not_analyzed(),
'order': {'type': 'integer'},
'region': {'type': 'integer'},
'shelf': {'type': 'long'},
}
}
}
@classmethod
def extract_document(cls, pk=None, obj=None):
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
return {
'id': obj.id,
'app': (obj.app_id if obj.item_type == feed.FEED_TYPE_APP
else None),
'brand': (obj.brand_id if obj.item_type == feed.FEED_TYPE_BRAND
else None),
'carrier': obj.carrier,
'category': obj.category,
'collection': (obj.collection_id if
obj.item_type == feed.FEED_TYPE_COLL else None),
'item_type': obj.item_type,
# If no order, put it at end. Make sure order > 0 since we do a
# ES reciprocal modifier query.
'order': obj.order + 1 if obj.order is not None else 100,
'region': obj.region,
'shelf': (obj.shelf_id if obj.item_type == feed.FEED_TYPE_SHELF
else None),
}
| bsd-3-clause | 29a594a40791083d53e1576fe8eec47a | 35.574713 | 78 | 0.494107 | 4.256856 | false | false | false | false |
mozilla/zamboni | mkt/developers/utils.py | 6 | 6499 | import os
import uuid
from datetime import datetime
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from appvalidator.constants import PRERELEASE_PERMISSIONS
import commonware.log
from PIL import Image
from django.utils.translation import ugettext as _
import mkt
from lib.video import library as video_library
from mkt.comm.utils import create_comm_note
from mkt.constants import APP_PREVIEW_MINIMUMS, comm
from mkt.constants.base import PROMO_IMG_MINIMUMS
from mkt.reviewers.models import EscalationQueue
from mkt.site.storage_utils import private_storage
from mkt.site.utils import ImageCheck
from mkt.users.models import UserProfile
log = commonware.log.getLogger('z.devhub')
def uri_to_pk(uri):
"""
Convert a resource URI to the primary key of the resource.
"""
return uri.rstrip('/').split('/')[-1]
def check_upload(file_obj, upload_type, content_type):
errors = []
upload_hash = ''
is_icon = upload_type == 'icon'
is_preview = upload_type == 'preview'
is_promo_img = upload_type == 'promo_img'
is_video = content_type in mkt.VIDEO_TYPES
if not any([is_icon, is_preview, is_promo_img, is_video]):
raise ValueError('Unknown upload type.')
# By pushing the type onto the instance hash, we can easily see what
# to do with the file later.
ext = content_type.replace('/', '-')
upload_hash = '%s.%s' % (uuid.uuid4().hex, ext)
loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash)
with private_storage.open(loc, 'wb') as fd:
for chunk in file_obj:
fd.write(chunk)
# A flag to prevent us from attempting to open the image with PIL.
do_not_open = False
if is_video:
if not video_library:
errors.append(_('Video support not enabled.'))
else:
video = video_library(loc)
video.get_meta()
if not video.is_valid():
errors.extend(video.errors)
else:
check = ImageCheck(file_obj)
if (not check.is_image() or
content_type not in mkt.IMG_TYPES):
do_not_open = True
if is_icon:
errors.append(_('Icons must be either PNG or JPG.'))
else:
errors.append(_('Images must be either PNG or JPG.'))
if check.is_animated():
do_not_open = True
if is_icon:
errors.append(_('Icons cannot be animated.'))
else:
errors.append(_('Images cannot be animated.'))
max_size = (settings.MAX_ICON_UPLOAD_SIZE if is_icon else
settings.MAX_VIDEO_UPLOAD_SIZE if is_video else
settings.MAX_IMAGE_UPLOAD_SIZE if is_preview else None)
if max_size and file_obj.size > max_size:
do_not_open = True
if is_icon or is_video:
errors.append(
_('Please use files smaller than %s.') %
filesizeformat(max_size))
if ((is_icon or is_preview or is_promo_img) and not
is_video and not do_not_open):
file_obj.seek(0)
try:
im = Image.open(file_obj)
im.verify()
except IOError:
if is_icon:
errors.append(_('Icon could not be opened.'))
elif is_preview:
errors.append(_('Preview could not be opened.'))
elif is_promo_img:
errors.append(_('Promo image could not be opened.'))
else:
size_x, size_y = im.size
if is_icon:
# TODO: This should go away when we allow uploads for
# individual icon sizes.
if size_x < 128 or size_y < 128:
errors.append(_('Icons must be at least 128px by 128px.'))
if size_x != size_y:
errors.append(_('Icons must be square.'))
elif is_preview:
if (size_x < APP_PREVIEW_MINIMUMS[0] or
size_y < APP_PREVIEW_MINIMUMS[1]) and (
size_x < APP_PREVIEW_MINIMUMS[1] or
size_y < APP_PREVIEW_MINIMUMS[0]):
errors.append(
# L10n: {0} and {1} are the height/width of the preview
# in px.
_('App previews must be at least {0}px by {1}px or '
'{1}px by {0}px.').format(*APP_PREVIEW_MINIMUMS))
elif is_promo_img:
if (size_x < PROMO_IMG_MINIMUMS[0] or
size_y < PROMO_IMG_MINIMUMS[1]):
# Currently, not l10n'ed because for curator eyes only.
errors.append(
'Promo images must be at least {0}px by {1}px'
.format(*PROMO_IMG_MINIMUMS))
return errors, upload_hash
def escalate_app(app, version, user, msg, log_type):
# Add to escalation queue
EscalationQueue.objects.get_or_create(addon=app)
# Create comm note
create_comm_note(app, version, user, msg,
note_type=comm.ACTION_MAP(log_type))
# Log action
mkt.log(log_type, app, version, created=datetime.now(),
details={'comments': msg})
log.info(u'[app:%s] escalated - %s' % (app.name, msg))
def handle_vip(addon, version, user):
escalate_app(
addon, version, user, u'VIP app updated',
mkt.LOG.ESCALATION_VIP_APP)
def escalate_prerelease_permissions(app, validation, version):
"""Escalate the app if it uses prerelease permissions."""
# When there are no permissions `validation['permissions']` will be
# `False` so we should default to an empty list if `get` is falsey.
app_permissions = validation.get('permissions') or []
if any(perm in PRERELEASE_PERMISSIONS for perm in app_permissions):
nobody = UserProfile.objects.get(email=settings.NOBODY_EMAIL_ADDRESS)
escalate_app(
app, version, nobody, 'App uses prerelease permissions',
mkt.LOG.ESCALATION_PRERELEASE_APP)
def prioritize_app(app, user):
app.update(priority_review=True)
msg = u'Priority Review Requested'
# Create notes and log entries.
create_comm_note(app, app.latest_version, user, msg,
note_type=comm.PRIORITY_REVIEW_REQUESTED)
mkt.log(mkt.LOG.PRIORITY_REVIEW_REQUESTED, app, app.latest_version,
created=datetime.now(), details={'comments': msg})
| bsd-3-clause | f35f10acaa76a640436ef41f5402afd9 | 35.105556 | 79 | 0.58809 | 3.809496 | false | false | false | false |
mozilla/zamboni | mkt/submit/forms.py | 3 | 14839 | import datetime
import os
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
import basket
import happyforms
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
import mkt
from mkt.comm.utils import create_comm_note
from mkt.constants import APP_FEATURES, comm
from mkt.developers.forms import AppSupportFormMixin, verify_app_domain
from mkt.files.models import FileUpload
from mkt.files.utils import parse_addon
from mkt.reviewers.models import RereviewQueue
from mkt.site.utils import slug_validator
from mkt.tags.models import Tag
from mkt.tags.utils import clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.widgets import TransInput, TransTextarea
from mkt.users.models import UserNotification
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AppFeatures, BlockedSlug, Webapp
def mark_for_rereview(addon, added_devices, removed_devices):
msg = _(u'Device(s) changed: {0}').format(', '.join(
[_(u'Added {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in added_devices] +
[_(u'Removed {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in removed_devices]))
RereviewQueue.flag(addon, mkt.LOG.REREVIEW_DEVICES_ADDED, msg)
def mark_for_rereview_features_change(addon, added_features, removed_features):
# L10n: {0} is the list of requirements changes.
msg = _(u'Requirements changed: {0}').format(', '.join(
[_(u'Added {0}').format(f) for f in added_features] +
[_(u'Removed {0}').format(f) for f in removed_features]))
RereviewQueue.flag(addon, mkt.LOG.REREVIEW_FEATURES_CHANGED, msg)
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree and Continue'),
widget=forms.HiddenInput)
newsletter = forms.BooleanField(required=False, label=app_surveys.label,
widget=forms.CheckboxInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
self.request = kw.pop('request')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.datetime.now()
self.instance.save()
if self.cleaned_data.get('newsletter'):
UserNotification.update_or_create(
user=self.instance,
notification_id=app_surveys.id, update={'enabled': True})
basket.subscribe(self.instance.email,
'app-dev',
format='H',
country=self.request.REGION.slug,
lang=self.request.LANG,
source_url=os.path.join(settings.SITE_URL,
'developers/submit'))
class NewWebappVersionForm(happyforms.Form):
upload_error = _lazy(u'There was an error with your upload. '
u'Please try again.')
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': upload_error})
def __init__(self, *args, **kw):
kw.pop('request', None)
self.addon = kw.pop('addon', None)
self._is_packaged = kw.pop('is_packaged', False)
self.is_homescreen = False
super(NewWebappVersionForm, self).__init__(*args, **kw)
def clean(self):
data = self.cleaned_data
if 'upload' not in self.cleaned_data:
self._errors['upload'] = self.upload_error
return
if self.is_packaged():
# Now run the packaged app check, done in clean, because
# clean_packaged needs to be processed first.
try:
pkg = parse_addon(data['upload'], self.addon)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
# Collect validation errors so we can display them at once.
errors = []
ver = pkg.get('version')
if (ver and self.addon and
self.addon.versions.filter(version=ver).exists()):
errors.append(_(u'Version %s already exists.') % ver)
origin = pkg.get('origin')
if origin:
try:
verify_app_domain(origin, packaged=True,
exclude=self.addon)
except forms.ValidationError, e:
errors.append(e.message)
if self.addon and origin != self.addon.app_domain:
errors.append(_('Changes to "origin" are not allowed.'))
self.is_homescreen = pkg.get('role') == 'homescreen'
if errors:
self._errors['upload'] = self.error_class(errors)
return
else:
# Throw an error if this is a dupe.
# (JS sets manifest as `upload.name`.)
try:
verify_app_domain(data['upload'].name)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
return data
def is_packaged(self):
return self._is_packaged
class NewWebappForm(NewWebappVersionForm):
ERRORS = {
'user': _lazy('User submitting validation does not match.')
}
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': _lazy(
u'There was an error with your upload. Please try again.')})
packaged = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(NewWebappForm, self).__init__(*args, **kwargs)
def clean(self):
data = super(NewWebappForm, self).clean()
if not data:
return
upload = data.get('upload')
if self.request and upload:
if not (upload.user and upload.user.pk == self.request.user.pk):
self._errors['upload'] = self.ERRORS['user']
return data
def is_packaged(self):
return self._is_packaged or self.cleaned_data.get('packaged', False)
class AppDetailsBasicForm(AppSupportFormMixin, TranslationFormMixin,
happyforms.ModelForm):
"""Form for "Details" submission step."""
PRIVACY_MDN_URL = (
'https://developer.mozilla.org/Marketplace/'
'Publishing/Policies_and_Guidelines/Privacy_policies')
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
_lazy(u'Publish my app and make it visible to everyone in the '
u'Marketplace and include it in search results.')),
(mkt.PUBLISH_PRIVATE,
_lazy(u'Do not publish my app. Notify me and I will adjust app '
u'visibility after it is approved.')),
)
app_slug = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'class': 'm'}))
description = TransField(
label=_lazy(u'Description:'),
help_text=_lazy(u'The app description is one of the fields used to '
u'return search results in the Firefox Marketplace. '
u'The app description also appears on the app\'s '
u'detail page. Be sure to include a description that '
u'accurately represents your app.'),
widget=TransTextarea(attrs={'rows': 4}))
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
privacy_policy = TransField(
label=_lazy(u'Privacy Policy:'),
widget=TransTextarea(attrs={'rows': 6}),
help_text=_lazy(
u'A privacy policy explains how you handle data received '
u'through your app. For example: what data do you receive? '
u'How do you use it? Who do you share it with? Do you '
u'receive personal information? Do you take steps to make '
u'it anonymous? What choices do users have to control what '
u'data you and others receive? Enter your privacy policy '
u'link or text above. If you don\'t have a privacy '
u'policy, <a href="{url}" target="_blank">learn more on how to '
u'write one.</a>'))
homepage = TransField.adapt(forms.URLField)(
label=_lazy(u'Homepage:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'If your app has another homepage, enter its address here.'))
support_url = TransField.adapt(forms.URLField)(
label=_lazy(u'Website:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'If your app has a support website or forum, enter its address '
u'here.'))
support_email = TransField.adapt(forms.EmailField)(
label=_lazy(u'Email:'), required=False,
widget=TransInput(attrs={'class': 'full'}),
help_text=_lazy(
u'This email address will be listed publicly on the Marketplace '
u'and used by end users to contact you with support issues. This '
u'email address will be listed publicly on your app details page.'
))
notes = forms.CharField(
label=_lazy(u'Your comments for reviewers:'), required=False,
widget=forms.Textarea(attrs={'rows': 2}),
help_text=_lazy(
u'Your app will be reviewed by Mozilla before it becomes publicly '
u'listed on the Marketplace. Enter any special instructions for '
u'the app reviewers here.'))
publish_type = forms.TypedChoiceField(
label=_lazy(u'Once your app is approved, choose a publishing option:'),
choices=PUBLISH_CHOICES, initial=mkt.PUBLISH_IMMEDIATE,
widget=forms.RadioSelect())
is_offline = forms.BooleanField(
label=_lazy(u'My app works without an Internet connection.'),
required=False)
class Meta:
model = Webapp
fields = ('app_slug', 'description', 'privacy_policy', 'homepage',
'support_url', 'support_email', 'publish_type', 'is_offline')
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
# TODO: remove this and put it in the field definition above.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1072513
privacy_field = self.base_fields['privacy_policy']
privacy_field.help_text = mark_safe(privacy_field.help_text.format(
url=self.PRIVACY_MDN_URL))
if 'instance' in kwargs:
instance = kwargs['instance']
instance.is_offline = instance.guess_is_offline()
super(AppDetailsBasicForm, self).__init__(*args, **kwargs)
def clean_app_slug(self):
slug = self.cleaned_data['app_slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.'
% slug))
return slug.lower()
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def save(self, *args, **kw):
if self.data['notes']:
create_comm_note(self.instance, self.instance.versions.latest(),
self.request.user, self.data['notes'],
note_type=comm.SUBMISSION)
self.instance = super(AppDetailsBasicForm, self).save(commit=True)
for tag_text in self.cleaned_data['tags']:
Tag(tag_text=tag_text).save_tag(self.instance)
return self.instance
class AppFeaturesForm(happyforms.ModelForm):
class Meta:
exclude = ['version']
model = AppFeatures
def __init__(self, *args, **kwargs):
super(AppFeaturesForm, self).__init__(*args, **kwargs)
if self.instance:
self.initial_feature_keys = sorted(self.instance.to_keys())
else:
self.initial_feature_keys = None
def all_fields(self):
"""
Degeneratorizes self.__iter__(), the list of fields on the form. This
allows further manipulation of fields: to display a subset of fields or
order them in a specific way.
"""
return [f for f in self.__iter__()]
def required_api_fields(self):
"""
All fields on the form, alphabetically sorted by help text.
"""
return sorted(self.all_fields(), key=lambda x: x.help_text)
def get_tooltip(self, field):
field_id = field.name.split('_', 1)[1].upper()
return (unicode(APP_FEATURES[field_id].get('description') or '') if
field_id in APP_FEATURES else None)
def get_changed_features(self):
old_features = dict.fromkeys(self.initial_feature_keys, True)
old_features = set(AppFeatures(**old_features).to_names())
new_features = set(self.instance.to_names())
added_features = new_features - old_features
removed_features = old_features - new_features
return added_features, removed_features
def save(self, *args, **kwargs):
mark_for_rereview = kwargs.pop('mark_for_rereview', True)
addon = self.instance.version.addon
rval = super(AppFeaturesForm, self).save(*args, **kwargs)
# Also save the addon to update modified date and trigger a reindex.
addon.save(update_fields=['modified'])
# Trigger a re-review if necessary.
if (self.instance and mark_for_rereview and
addon.status in mkt.WEBAPPS_APPROVED_STATUSES and
self.changed_data):
added_features, removed_features = self.get_changed_features()
mark_for_rereview_features_change(addon,
added_features,
removed_features)
return rval
| bsd-3-clause | ab96e75fcfa1e9d190a2c0583eb6f622 | 40.105263 | 79 | 0.598827 | 4.11737 | false | false | false | false |
gitpython-developers/gitpython | git/objects/commit.py | 1 | 27232 | # commit.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import datetime
import re
from subprocess import Popen, PIPE
from gitdb import IStream
from git.util import hex_to_bin, Actor, Stats, finalize_process
from git.diff import Diffable
from git.cmd import Git
from .tree import Tree
from . import base
from .util import (
Serializable,
TraversableIterableObj,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from time import time, daylight, altzone, timezone, localtime
import os
from io import BytesIO
import logging
# typing ------------------------------------------------------------------
from typing import (
Any,
IO,
Iterator,
List,
Sequence,
Tuple,
Union,
TYPE_CHECKING,
cast,
Dict,
)
from git.types import PathLike, Literal
if TYPE_CHECKING:
from git.repo import Repo
from git.refs import SymbolicReference
# ------------------------------------------------------------------------
log = logging.getLogger("git.objects.commit")
log.addHandler(logging.NullHandler())
__all__ = ("Commit",)
class Commit(base.Object, TraversableIterableObj, Diffable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = "i18n.commitencoding"
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type: Literal["commit"] = "commit"
__slots__ = (
"tree",
"author",
"authored_date",
"author_tz_offset",
"committer",
"committed_date",
"committer_tz_offset",
"message",
"parents",
"encoding",
"gpgsig",
)
_id_attribute_ = "hexsha"
def __init__(
self,
repo: "Repo",
binsha: bytes,
tree: Union[Tree, None] = None,
author: Union[Actor, None] = None,
authored_date: Union[int, None] = None,
author_tz_offset: Union[None, float] = None,
committer: Union[Actor, None] = None,
committed_date: Union[int, None] = None,
committer_tz_offset: Union[None, float] = None,
message: Union[str, bytes, None] = None,
parents: Union[Sequence["Commit"], None] = None,
encoding: Union[str, None] = None,
gpgsig: Union[str, None] = None,
) -> None:
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree object
:param author: Actor
is the author Actor object
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the committed_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note:
Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit, self).__init__(repo, binsha)
self.binsha = binsha
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit: "Commit") -> Tuple["Commit", ...]:
return tuple(commit.parents)
@classmethod
def _calculate_sha_(cls, repo: "Repo", commit: "Commit") -> bytes:
"""Calculate the sha of a commit.
:param repo: Repo object the commit should be part of
:param commit: Commit object for which to generate the sha
"""
stream = BytesIO()
commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
return istream.binsha
def replace(self, **kwargs: Any) -> "Commit":
"""Create new commit object from existing commit object.
Any values provided as keyword arguments will replace the
corresponding attribute in the new object.
"""
attrs = {k: getattr(self, k) for k in self.__slots__}
for attrname in kwargs:
if attrname not in self.__slots__:
raise ValueError("invalid attribute name")
attrs.update(kwargs)
new_commit = self.__class__(self.repo, self.NULL_BIN_SHA, **attrs)
new_commit.binsha = self._calculate_sha_(self.repo, new_commit)
return new_commit
def _set_cache_(self, attr: str) -> None:
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
_binsha, _typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def authored_datetime(self) -> datetime.datetime:
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self) -> datetime.datetime:
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self) -> Union[str, bytes]:
""":return: First line of the commit message"""
if isinstance(self.message, str):
return self.message.split("\n", 1)[0]
else:
return self.message.split(b"\n", 1)[0]
def count(self, paths: Union[PathLike, Sequence[PathLike]] = "", **kwargs: Any) -> int:
"""Count the number of commits reachable from this commit
:param paths:
is an optional path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the output style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, "--", paths, **kwargs).splitlines())
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self) -> str:
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def iter_items(
cls,
repo: "Repo",
rev: Union[str, "Commit", "SymbolicReference"], # type: ignore
paths: Union[PathLike, Sequence[PathLike]] = "",
**kwargs: Any,
) -> Iterator["Commit"]:
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optional path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-list where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if "pretty" in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args_list: List[PathLike] = ["--"]
if paths:
paths_tup: Tuple[PathLike, ...]
if isinstance(paths, (str, os.PathLike)):
paths_tup = (paths,)
else:
paths_tup = tuple(paths)
args_list.extend(paths_tup)
# END if paths
proc = repo.git.rev_list(rev, args_list, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths: Union[PathLike, Sequence[PathLike]] = "", **kwargs: Any) -> Iterator["Commit"]:
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self"""
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs["skip"] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self) -> Stats:
"""Create a git stat from changes between this commit and its first parent
or from all changes done if this is the very first commit.
:return: git.Stats"""
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, "--", numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, "--", numstat=True)
return Stats._list_from_string(self.repo, text)
@property
def trailers(self) -> Dict:
"""Get the trailers of the message as dictionary
Git messages can contain trailer information that are similar to RFC 822
e-mail headers (see: https://git-scm.com/docs/git-interpret-trailers).
This functions calls ``git interpret-trailers --parse`` onto the message
to extract the trailer information. The key value pairs are stripped of
leading and trailing whitespaces before they get saved into a dictionary.
Valid message with trailer:
.. code-block::
Subject line
some body information
another information
key1: value1
key2 : value 2 with inner spaces
dictionary will look like this:
.. code-block::
{
"key1": "value1",
"key2": "value 2 with inner spaces"
}
:return: Dictionary containing whitespace stripped trailer information
"""
d = {}
cmd = ["git", "interpret-trailers", "--parse"]
proc: Git.AutoInterrupt = self.repo.git.execute(cmd, as_process=True, istream=PIPE) # type: ignore
trailer: str = proc.communicate(str(self.message).encode())[0].decode()
if trailer.endswith("\n"):
trailer = trailer[0:-1]
if trailer != "":
for line in trailer.split("\n"):
key, value = line.split(":", 1)
d[key.strip()] = value.strip()
return d
@classmethod
def _iter_from_process_or_stream(cls, repo: "Repo", proc_or_stream: Union[Popen, IO]) -> Iterator["Commit"]:
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
# def is_proc(inp) -> TypeGuard[Popen]:
# return hasattr(proc_or_stream, 'wait') and not hasattr(proc_or_stream, 'readline')
# def is_stream(inp) -> TypeGuard[IO]:
# return hasattr(proc_or_stream, 'readline')
if hasattr(proc_or_stream, "wait"):
proc_or_stream = cast(Popen, proc_or_stream)
if proc_or_stream.stdout is not None:
stream = proc_or_stream.stdout
elif hasattr(proc_or_stream, "readline"):
proc_or_stream = cast(IO, proc_or_stream)
stream = proc_or_stream
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield cls(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, "wait"):
proc_or_stream = cast(Popen, proc_or_stream)
finalize_process(proc_or_stream)
@classmethod
def create_from_tree(
cls,
repo: "Repo",
tree: Union[Tree, str],
message: str,
parent_commits: Union[None, List["Commit"]] = None,
head: bool = False,
author: Union[None, Actor] = None,
committer: Union[None, Actor] = None,
author_date: Union[None, str] = None,
commit_date: Union[None, str] = None,
) -> "Commit":
"""Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string , in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:param author: The name of the author, optional. If unset, the repository
configuration is used to obtain this value.
:param committer: The name of the committer, optional. If unset, the
repository configuration is used to obtain this value.
:param author_date: The timestamp for the author field
:param commit_date: The timestamp for the committer field
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information"""
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = []
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError(f"Parent commit '{p!r}' must be of type {cls}")
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITTER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, "")
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, "")
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split(".")
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
if not isinstance(conf_encoding, str):
raise TypeError("conf_encoding could not be coerced to str")
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(
repo,
cls.NULL_BIN_SHA,
tree,
author,
author_time,
author_offset,
committer,
committer_time,
committer_offset,
message,
parent_commits,
conf_encoding,
)
new_commit.binsha = cls._calculate_sha_(repo, new_commit)
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(
repo,
repo.head.ref,
new_commit,
logmsg="commit (initial): %s" % message,
)
repo.head.set_reference(master, logmsg="commit: Switching to %s" % master)
# END handle empty repositories
# END advance head handling
return new_commit
# { Serializable Implementation
def _serialize(self, stream: BytesIO) -> "Commit":
write = stream.write
write(("tree %s\n" % self.tree).encode("ascii"))
for p in self.parents:
write(("parent %s\n" % p).encode("ascii"))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write(
(
fmt
% (
"author",
aname,
a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset),
)
).encode(self.encoding)
)
# encode committer
aname = c.name
write(
(
fmt
% (
"committer",
aname,
c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset),
)
).encode(self.encoding)
)
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode("ascii"))
try:
if self.__getattribute__("gpgsig"):
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode("ascii"))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, str):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream: BytesIO) -> "Commit":
"""
:param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object
"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, "")
self.parents = []
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b"parent"):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode("ascii"))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
# we might run into one or more mergetag blocks, skip those for now
next_line = readline()
while next_line.startswith(b"mergetag "):
next_line = readline()
while next_line.startswith(b" "):
next_line = readline()
# end skip mergetags
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
self.gpgsig = ""
# read headers
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(b" ") + 1 :].decode(self.encoding, "ignore")
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b" ") + 1 :] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
# end read all signature
self.gpgsig = sig.rstrip(b"\n").decode(self.encoding, "ignore")
if is_next_header:
continue
buf = readline().strip()
# decode the authors name
try:
(
self.author,
self.authored_date,
self.author_tz_offset,
) = parse_actor_and_date(author_line.decode(self.encoding, "replace"))
except UnicodeDecodeError:
log.error(
"Failed to decode author line '%s' using encoding %s",
author_line,
self.encoding,
exc_info=True,
)
try:
(
self.committer,
self.committed_date,
self.committer_tz_offset,
) = parse_actor_and_date(committer_line.decode(self.encoding, "replace"))
except UnicodeDecodeError:
log.error(
"Failed to decode committer line '%s' using encoding %s",
committer_line,
self.encoding,
exc_info=True,
)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, "replace")
except UnicodeDecodeError:
log.error(
"Failed to decode message '%s' using encoding %s",
self.message,
self.encoding,
exc_info=True,
)
# END exception handling
return self
# } END serializable implementation
@property
def co_authors(self) -> List[Actor]:
"""
Search the commit message for any co-authors of this commit.
Details on co-authors: https://github.blog/2018-01-29-commit-together-with-co-authors/
:return: List of co-authors for this commit (as Actor objects).
"""
co_authors = []
if self.message:
results = re.findall(
r"^Co-authored-by: (.*) <(.*?)>$",
self.message,
re.MULTILINE,
)
for author in results:
co_authors.append(Actor(*author))
return co_authors
| bsd-3-clause | 5729a23615d51f7cc899b303a7b0212c | 34.737533 | 113 | 0.567935 | 4.336996 | false | false | false | false |
cqlengine/cqlengine | cqlengine/management.py | 1 | 11044 | import json
import warnings
import six
from cqlengine import SizeTieredCompactionStrategy, LeveledCompactionStrategy
from cqlengine import ONE
from cqlengine.named import NamedTable
from cqlengine.connection import execute, get_cluster
from cqlengine.exceptions import CQLEngineException
import logging
from collections import namedtuple
Field = namedtuple('Field', ['name', 'type'])
logger = logging.getLogger(__name__)
from cqlengine.models import Model
# system keyspaces
schema_columnfamilies = NamedTable('system', 'schema_columnfamilies')
def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values):
"""
creates a keyspace
:param name: name of keyspace to create
:param strategy_class: keyspace replication strategy class
:param replication_factor: keyspace replication factor
:param durable_writes: 1.2 only, write log is bypassed if set to False
:param **replication_values: 1.2 only, additional values to ad to the replication data map
"""
cluster = get_cluster()
if name not in cluster.metadata.keyspaces:
#try the 1.2 method
replication_map = {
'class': strategy_class,
'replication_factor':replication_factor
}
replication_map.update(replication_values)
if strategy_class.lower() != 'simplestrategy':
# Although the Cassandra documentation states for `replication_factor`
# that it is "Required if class is SimpleStrategy; otherwise,
# not used." we get an error if it is present.
replication_map.pop('replication_factor', None)
query = """
CREATE KEYSPACE {}
WITH REPLICATION = {}
""".format(name, json.dumps(replication_map).replace('"', "'"))
if strategy_class != 'SimpleStrategy':
query += " AND DURABLE_WRITES = {}".format('true' if durable_writes else 'false')
execute(query)
def delete_keyspace(name):
cluster = get_cluster()
if name in cluster.metadata.keyspaces:
execute("DROP KEYSPACE {}".format(name))
def create_table(model):
raise CQLEngineException("create_table is deprecated, please use sync_table")
def sync_table(model):
"""
Inspects the model and creates / updates the corresponding table and columns.
Note that the attributes removed from the model are not deleted on the database.
They become effectively ignored by (will not show up on) the model.
"""
if not issubclass(model, Model):
raise CQLEngineException("Models must be derived from base Model.")
if model.__abstract__:
raise CQLEngineException("cannot create table from abstract model")
#construct query string
cf_name = model.column_family_name()
raw_cf_name = model.column_family_name(include_keyspace=False)
ks_name = model._get_keyspace()
cluster = get_cluster()
keyspace = cluster.metadata.keyspaces[ks_name]
tables = keyspace.tables
#check for an existing column family
if raw_cf_name not in tables:
qs = get_create_table(model)
try:
execute(qs)
except CQLEngineException as ex:
# 1.2 doesn't return cf names, so we have to examine the exception
# and ignore if it says the column family already exists
if "Cannot add already existing column family" not in unicode(ex):
raise
else:
# see if we're missing any columns
fields = get_fields(model)
field_names = [x.name for x in fields]
for name, col in model._columns.items():
if col.primary_key or col.partition_key: continue # we can't mess with the PK
if col.db_field_name in field_names: continue # skip columns already defined
# add missing column using the column def
query = "ALTER TABLE {} add {}".format(cf_name, col.get_column_def())
logger.debug(query)
execute(query)
update_compaction(model)
table = cluster.metadata.keyspaces[ks_name].tables[raw_cf_name]
indexes = [c for n,c in model._columns.items() if c.index]
for column in indexes:
if table.columns[column.db_field_name].index:
continue
qs = ['CREATE INDEX index_{}_{}'.format(raw_cf_name, column.db_field_name)]
qs += ['ON {}'.format(cf_name)]
qs += ['("{}")'.format(column.db_field_name)]
qs = ' '.join(qs)
execute(qs)
def get_create_table(model):
cf_name = model.column_family_name()
qs = ['CREATE TABLE {}'.format(cf_name)]
#add column types
pkeys = [] # primary keys
ckeys = [] # clustering keys
qtypes = [] # field types
def add_column(col):
s = col.get_column_def()
if col.primary_key:
keys = (pkeys if col.partition_key else ckeys)
keys.append('"{}"'.format(col.db_field_name))
qtypes.append(s)
for name, col in model._columns.items():
add_column(col)
qtypes.append('PRIMARY KEY (({}){})'.format(', '.join(pkeys), ckeys and ', ' + ', '.join(ckeys) or ''))
qs += ['({})'.format(', '.join(qtypes))]
with_qs = []
table_properties = ['bloom_filter_fp_chance', 'caching', 'comment',
'dclocal_read_repair_chance', 'default_time_to_live', 'gc_grace_seconds',
'index_interval', 'memtable_flush_period_in_ms', 'populate_io_cache_on_flush',
'read_repair_chance', 'replicate_on_write']
for prop_name in table_properties:
prop_value = getattr(model, '__{}__'.format(prop_name), None)
if prop_value is not None:
# Strings needs to be single quoted
if isinstance(prop_value, six.string_types):
prop_value = "'{}'".format(prop_value)
with_qs.append("{} = {}".format(prop_name, prop_value))
_order = ['"{}" {}'.format(c.db_field_name, c.clustering_order or 'ASC') for c in model._clustering_keys.values()]
if _order:
with_qs.append('clustering order by ({})'.format(', '.join(_order)))
compaction_options = get_compaction_options(model)
if compaction_options:
compaction_options = json.dumps(compaction_options).replace('"', "'")
with_qs.append("compaction = {}".format(compaction_options))
# Add table properties.
if with_qs:
qs += ['WITH {}'.format(' AND '.join(with_qs))]
qs = ' '.join(qs)
return qs
def get_compaction_options(model):
"""
Generates dictionary (later converted to a string) for creating and altering
tables with compaction strategy
:param model:
:return:
"""
if not model.__compaction__:
return {}
result = {'class':model.__compaction__}
def setter(key, limited_to_strategy = None):
"""
sets key in result, checking if the key is limited to either SizeTiered or Leveled
:param key: one of the compaction options, like "bucket_high"
:param limited_to_strategy: SizeTieredCompactionStrategy, LeveledCompactionStrategy
:return:
"""
mkey = "__compaction_{}__".format(key)
tmp = getattr(model, mkey)
if tmp and limited_to_strategy and limited_to_strategy != model.__compaction__:
raise CQLEngineException("{} is limited to {}".format(key, limited_to_strategy))
if tmp:
# Explicitly cast the values to strings to be able to compare the
# values against introspected values from Cassandra.
result[key] = str(tmp)
setter('tombstone_compaction_interval')
setter('tombstone_threshold')
setter('bucket_high', SizeTieredCompactionStrategy)
setter('bucket_low', SizeTieredCompactionStrategy)
setter('max_threshold', SizeTieredCompactionStrategy)
setter('min_threshold', SizeTieredCompactionStrategy)
setter('min_sstable_size', SizeTieredCompactionStrategy)
setter('sstable_size_in_mb', LeveledCompactionStrategy)
return result
def get_fields(model):
# returns all fields that aren't part of the PK
ks_name = model._get_keyspace()
col_family = model.column_family_name(include_keyspace=False)
field_types = ['regular', 'static']
query = "select * from system.schema_columns where keyspace_name = %s and columnfamily_name = %s"
tmp = execute(query, [ks_name, col_family])
# Tables containing only primary keys do not appear to create
# any entries in system.schema_columns, as only non-primary-key attributes
# appear to be inserted into the schema_columns table
try:
return [Field(x['column_name'], x['validator']) for x in tmp if x['type'] in field_types]
except KeyError:
return [Field(x['column_name'], x['validator']) for x in tmp]
# convert to Field named tuples
def get_table_settings(model):
# returns the table as provided by the native driver for a given model
cluster = get_cluster()
ks = model._get_keyspace()
table = model.column_family_name(include_keyspace=False)
table = cluster.metadata.keyspaces[ks].tables[table]
return table
def update_compaction(model):
"""Updates the compaction options for the given model if necessary.
:param model: The model to update.
:return: `True`, if the compaction options were modified in Cassandra,
`False` otherwise.
:rtype: bool
"""
logger.debug("Checking %s for compaction differences", model)
table = get_table_settings(model)
existing_options = table.options.copy()
existing_compaction_strategy = existing_options['compaction_strategy_class']
existing_options = json.loads(existing_options['compaction_strategy_options'])
desired_options = get_compaction_options(model)
desired_compact_strategy = desired_options.get('class', SizeTieredCompactionStrategy)
desired_options.pop('class', None)
do_update = False
if desired_compact_strategy not in existing_compaction_strategy:
do_update = True
for k, v in desired_options.items():
val = existing_options.pop(k, None)
if val != v:
do_update = True
# check compaction_strategy_options
if do_update:
options = get_compaction_options(model)
# jsonify
options = json.dumps(options).replace('"', "'")
cf_name = model.column_family_name()
query = "ALTER TABLE {} with compaction = {}".format(cf_name, options)
logger.debug(query)
execute(query)
return True
return False
def delete_table(model):
raise CQLEngineException("delete_table has been deprecated in favor of drop_table()")
def drop_table(model):
# don't try to delete non existant tables
meta = get_cluster().metadata
ks_name = model._get_keyspace()
raw_cf_name = model.column_family_name(include_keyspace=False)
try:
table = meta.keyspaces[ks_name].tables[raw_cf_name]
execute('drop table {};'.format(model.column_family_name(include_keyspace=True)))
except KeyError:
pass
| bsd-3-clause | 91e7ef014164d7313581bd48161aadf0 | 33.404984 | 118 | 0.650308 | 3.91215 | false | false | false | false |
altair-viz/altair | altair/examples/diverging_stacked_bar_chart.py | 2 | 8736 | """
Diverging Stacked Bar Chart
---------------------------
This example shows a diverging stacked bar chart for sentiments towards a set of eight questions, displayed as percentages with neutral responses straddling the 0% mark.
"""
# category: bar charts
import altair as alt
source = alt.pd.DataFrame([
{
"question": "Question 1",
"type": "Strongly disagree",
"value": 24,
"percentage": 0.7,
"percentage_start": -19.1,
"percentage_end": -18.4
},
{
"question": "Question 1",
"type": "Disagree",
"value": 294,
"percentage": 9.1,
"percentage_start": -18.4,
"percentage_end": -9.2
},
{
"question": "Question 1",
"type": "Neither agree nor disagree",
"value": 594,
"percentage": 18.5,
"percentage_start": -9.2,
"percentage_end": 9.2
},
{
"question": "Question 1",
"type": "Agree",
"value": 1927,
"percentage": 59.9,
"percentage_start": 9.2,
"percentage_end": 69.2
},
{
"question": "Question 1",
"type": "Strongly agree",
"value": 376,
"percentage": 11.7,
"percentage_start": 69.2,
"percentage_end": 80.9
},
{
"question": "Question 2",
"type": "Strongly disagree",
"value": 2,
"percentage": 18.2,
"percentage_start": -36.4,
"percentage_end": -18.2
},
{
"question": "Question 2",
"type": "Disagree",
"value": 2,
"percentage": 18.2,
"percentage_start": -18.2,
"percentage_end": 0
},
{
"question": "Question 2",
"type": "Neither agree nor disagree",
"value": 0,
"percentage": 0,
"percentage_start": 0,
"percentage_end": 0
},
{
"question": "Question 2",
"type": "Agree",
"value": 7,
"percentage": 63.6,
"percentage_start": 0,
"percentage_end": 63.6
},
{
"question": "Question 2",
"type": "Strongly agree",
"value": 11,
"percentage": 0,
"percentage_start": 63.6,
"percentage_end": 63.6
},
{
"question": "Question 3",
"type": "Strongly disagree",
"value": 2,
"percentage": 20,
"percentage_start": -30,
"percentage_end": -10
},
{
"question": "Question 3",
"type": "Disagree",
"value": 0,
"percentage": 0,
"percentage_start": -10,
"percentage_end": -10
},
{
"question": "Question 3",
"type": "Neither agree nor disagree",
"value": 2,
"percentage": 20,
"percentage_start": -10,
"percentage_end": 10
},
{
"question": "Question 3",
"type": "Agree",
"value": 4,
"percentage": 40,
"percentage_start": 10,
"percentage_end": 50
},
{
"question": "Question 3",
"type": "Strongly agree",
"value": 2,
"percentage": 20,
"percentage_start": 50,
"percentage_end": 70
},
{
"question": "Question 4",
"type": "Strongly disagree",
"value": 0,
"percentage": 0,
"percentage_start": -15.6,
"percentage_end": -15.6
},
{
"question": "Question 4",
"type": "Disagree",
"value": 2,
"percentage": 12.5,
"percentage_start": -15.6,
"percentage_end": -3.1
},
{
"question": "Question 4",
"type": "Neither agree nor disagree",
"value": 1,
"percentage": 6.3,
"percentage_start": -3.1,
"percentage_end": 3.1
},
{
"question": "Question 4",
"type": "Agree",
"value": 7,
"percentage": 43.8,
"percentage_start": 3.1,
"percentage_end": 46.9
},
{
"question": "Question 4",
"type": "Strongly agree",
"value": 6,
"percentage": 37.5,
"percentage_start": 46.9,
"percentage_end": 84.4
},
{
"question": "Question 5",
"type": "Strongly disagree",
"value": 0,
"percentage": 0,
"percentage_start": -10.4,
"percentage_end": -10.4
},
{
"question": "Question 5",
"type": "Disagree",
"value": 1,
"percentage": 4.2,
"percentage_start": -10.4,
"percentage_end": -6.3
},
{
"question": "Question 5",
"type": "Neither agree nor disagree",
"value": 3,
"percentage": 12.5,
"percentage_start": -6.3,
"percentage_end": 6.3
},
{
"question": "Question 5",
"type": "Agree",
"value": 16,
"percentage": 66.7,
"percentage_start": 6.3,
"percentage_end": 72.9
},
{
"question": "Question 5",
"type": "Strongly agree",
"value": 4,
"percentage": 16.7,
"percentage_start": 72.9,
"percentage_end": 89.6
},
{
"question": "Question 6",
"type": "Strongly disagree",
"value": 1,
"percentage": 6.3,
"percentage_start": -18.8,
"percentage_end": -12.5
},
{
"question": "Question 6",
"type": "Disagree",
"value": 1,
"percentage": 6.3,
"percentage_start": -12.5,
"percentage_end": -6.3
},
{
"question": "Question 6",
"type": "Neither agree nor disagree",
"value": 2,
"percentage": 12.5,
"percentage_start": -6.3,
"percentage_end": 6.3
},
{
"question": "Question 6",
"type": "Agree",
"value": 9,
"percentage": 56.3,
"percentage_start": 6.3,
"percentage_end": 62.5
},
{
"question": "Question 6",
"type": "Strongly agree",
"value": 3,
"percentage": 18.8,
"percentage_start": 62.5,
"percentage_end": 81.3
},
{
"question": "Question 7",
"type": "Strongly disagree",
"value": 0,
"percentage": 0,
"percentage_start": -10,
"percentage_end": -10
},
{
"question": "Question 7",
"type": "Disagree",
"value": 0,
"percentage": 0,
"percentage_start": -10,
"percentage_end": -10
},
{
"question": "Question 7",
"type": "Neither agree nor disagree",
"value": 1,
"percentage": 20,
"percentage_start": -10,
"percentage_end": 10
},
{
"question": "Question 7",
"type": "Agree",
"value": 4,
"percentage": 80,
"percentage_start": 10,
"percentage_end": 90
},
{
"question": "Question 7",
"type": "Strongly agree",
"value": 0,
"percentage": 0,
"percentage_start": 90,
"percentage_end": 90
},
{
"question": "Question 8",
"type": "Strongly disagree",
"value": 0,
"percentage": 0,
"percentage_start": 0,
"percentage_end": 0
},
{
"question": "Question 8",
"type": "Disagree",
"value": 0,
"percentage": 0,
"percentage_start": 0,
"percentage_end": 0
},
{
"question": "Question 8",
"type": "Neither agree nor disagree",
"value": 0,
"percentage": 0,
"percentage_start": 0,
"percentage_end": 0
},
{
"question": "Question 8",
"type": "Agree",
"value": 0,
"percentage": 0,
"percentage_start": 0,
"percentage_end": 0
},
{
"question": "Question 8",
"type": "Strongly agree",
"value": 2,
"percentage": 100,
"percentage_start": 0,
"percentage_end": 100
}
])
color_scale = alt.Scale(
domain=[
"Strongly disagree",
"Disagree",
"Neither agree nor disagree",
"Agree",
"Strongly agree"
],
range=["#c30d24", "#f3a583", "#cccccc", "#94c6da", "#1770ab"]
)
y_axis = alt.Axis(
title='Question',
offset=5,
ticks=False,
minExtent=60,
domain=False
)
alt.Chart(source).mark_bar().encode(
x='percentage_start:Q',
x2='percentage_end:Q',
y=alt.Y('question:N', axis=y_axis),
color=alt.Color(
'type:N',
legend=alt.Legend( title='Response'),
scale=color_scale,
)
)
| bsd-3-clause | 07f1e2b185d661aca35e556209d83bda | 22.803815 | 169 | 0.45158 | 3.590629 | false | false | false | false |
altair-viz/altair | altair/examples/normed_parallel_coordinates.py | 1 | 1293 | """
Normalized Parallel Coordinates
-------------------------------
A `Parallel Coordinates <https://en.wikipedia.org/wiki/Parallel_coordinates>`_
chart is a chart that lets you visualize the individual data points by drawing
a single line for each of them.
Such a chart can be created in Altair by first transforming the data into a
suitable representation.
This example shows a modified parallel coordinates chart with the Iris dataset,
where the y-axis shows the value after min-max rather than the raw value. It's a
simplified Altair version of `the VegaLite version <https://vega.github.io/vega-lite/examples/parallel_coordinate.html>`_
"""
# category: advanced calculations
import altair as alt
from vega_datasets import data
from altair import datum
source = data.iris()
alt.Chart(source).transform_window(
index='count()'
).transform_fold(
['petalLength', 'petalWidth', 'sepalLength', 'sepalWidth']
).transform_joinaggregate(
min='min(value)',
max='max(value)',
groupby=['key']
).transform_calculate(
minmax_value=(datum.value-datum.min)/(datum.max-datum.min),
mid=(datum.min+datum.max)/2
).mark_line().encode(
x='key:N',
y='minmax_value:Q',
color='species:N',
detail='index:N',
opacity=alt.value(0.5)
).properties(width=500)
| bsd-3-clause | be78504c8932440dd3564a6f244df740 | 30.536585 | 121 | 0.71075 | 3.561983 | false | false | false | false |
altair-viz/altair | altair/expr/tests/test_expr.py | 2 | 2880 | import operator
import pytest
from ... import expr
from .. import datum
def test_unary_operations():
OP_MAP = {"-": operator.neg, "+": operator.pos}
for op, func in OP_MAP.items():
z = func(datum.xxx)
assert repr(z) == "({}datum.xxx)".format(op)
def test_binary_operations():
OP_MAP = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"%": operator.mod,
"===": operator.eq,
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
"!==": operator.ne,
"&&": operator.and_,
"||": operator.or_,
}
# When these are on the RHS, the opposite is evaluated instead.
INEQ_REVERSE = {
">": "<",
"<": ">",
"<=": ">=",
">=": "<=",
"===": "===",
"!==": "!==",
}
for op, func in OP_MAP.items():
z1 = func(datum.xxx, 2)
assert repr(z1) == "(datum.xxx {} 2)".format(op)
z2 = func(2, datum.xxx)
if op in INEQ_REVERSE:
assert repr(z2) == "(datum.xxx {} 2)".format(INEQ_REVERSE[op])
else:
assert repr(z2) == "(2 {} datum.xxx)".format(op)
z3 = func(datum.xxx, datum.yyy)
assert repr(z3) == "(datum.xxx {} datum.yyy)".format(op)
def test_abs():
z = abs(datum.xxx)
assert repr(z) == "abs(datum.xxx)"
def test_expr_funcs():
"""test all functions defined in expr.funcs"""
name_map = {val: key for key, val in expr.funcs.NAME_MAP.items()}
for funcname in expr.funcs.__all__:
func = getattr(expr, funcname)
z = func(datum.xxx)
assert repr(z) == "{}(datum.xxx)".format(name_map.get(funcname, funcname))
def test_expr_consts():
"""Test all constants defined in expr.consts"""
name_map = {val: key for key, val in expr.consts.NAME_MAP.items()}
for constname in expr.consts.__all__:
const = getattr(expr, constname)
z = const * datum.xxx
assert repr(z) == "({} * datum.xxx)".format(name_map.get(constname, constname))
def test_json_reprs():
"""Test JSON representations of special values"""
assert repr(datum.xxx == None) == "(datum.xxx === null)" # noqa: E711
assert repr(datum.xxx == False) == "(datum.xxx === false)" # noqa: E712
assert repr(datum.xxx == True) == "(datum.xxx === true)" # noqa: E712
def test_to_dict():
ex = datum.xxx * 2 > datum.yyy
assert ex.to_dict() == repr(ex)
def test_copy():
ex = datum.xxx * 2 > abs(datum.yyy)
ex_copy = ex.copy()
assert ex.to_dict() == ex_copy.to_dict()
def test_datum_getattr():
x = datum["foo"]
assert repr(x) == "datum['foo']"
with pytest.raises(AttributeError):
datum.__magic__
def test_expression_getitem():
x = datum.foo[0]
assert repr(x) == "datum.foo[0]"
| bsd-3-clause | af83204272c080cdad5bf0f4d623e4b8 | 26.169811 | 87 | 0.532986 | 3.283922 | false | true | false | false |
alphacsc/alphacsc | alphacsc/learn_d_z_multi.py | 1 | 17185 | # Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Tom Dupre La Tour <tom.duprelatour@telecom-paristech.fr>
# Umut Simsekli <umut.simsekli@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Thomas Moreau <thomas.moreau@inria.fr>
from __future__ import print_function
import time
import sys
import numpy as np
from .utils import check_dimension
from .utils import check_random_state
from .utils.convolution import sort_atoms_by_explained_variances
from ._z_encoder import get_z_encoder_for
from ._d_solver import get_solver_d
def learn_d_z_multi(X, n_atoms, n_times_atom, n_iter=60, n_jobs=1,
lmbd_max='fixed', reg=0.1,
rank1=True, uv_constraint='auto', eps=1e-10,
algorithm='batch', algorithm_params=dict(),
solver_z='l-bfgs', solver_z_kwargs=dict(),
solver_d='auto', solver_d_kwargs=dict(),
D_init=None,
unbiased_z_hat=False, stopping_pobj=None,
raise_on_increase=True, verbose=10, callback=None,
random_state=None, name="DL", window=False,
sort_atoms=False):
"""Multivariate Convolutional Sparse Coding with optional rank-1 constraint
Parameters
----------
X : array, shape (n_trials, n_channels, n_times)
The data on which to perform CSC.
n_atoms : int
The number of atoms to learn.
n_times_atom : int
The support of the atom.
reg : float
The regularization parameter
lmbd_max : 'fixed' | 'scaled' | 'per_atom' | 'shared'
If not fixed, adapt the regularization rate as a ratio of lambda_max:
- 'scaled': the regularization parameter is fixed as a ratio of its
maximal value at init i.e. reg_used = reg * lmbd_max(uv_init)
- 'shared': the regularization parameter is set at each iteration as
a ratio of its maximal value for the current dictionary estimate
i.e. reg_used = reg * lmbd_max(uv_hat)
- 'per_atom': the regularization parameter is set per atom and at
each iteration as a ratio of its maximal value for this atom i.e.
reg_used[k] = reg * lmbd_max(uv_hat[k])
n_iter : int
The number of coordinate-descent iterations.
n_jobs : int
The number of parallel jobs.
rank1 : boolean
If set to True, learn rank 1 dictionary atoms.
If solver_z is 'dicodile', then rank1 must be False.
uv_constraint : str in {'joint' | 'separate' | 'auto'}
The kind of norm constraint on the atoms if using rank1=True.
If 'joint', the constraint is norm_2([u, v]) <= 1
If 'separate', the constraint is norm_2(u) <= 1 and norm_2(v) <= 1
If rank1 is False, then uv_constraint must be 'auto'.
eps : float
Stopping criterion. If the cost descent after a uv and a z update is
smaller than eps, return.
algorithm : 'batch' | 'greedy' | 'online' | 'stochastic'
Dictionary learning algorithm.
algorithm_params : dict
Parameters for the global algorithm used to learn the dictionary:
alpha : float
Forgetting factor for online learning. If set to 0, the learning is
stochastic and each D-step is independent from the previous steps.
When set to 1, each the previous values z_hat - computed with
different dictionary - have the same weight as the current one.
This factor should be large enough to ensure convergence but to
large factor can lead to sub-optimal minima.
batch_selection : 'random' | 'cyclic'
The batch selection strategy for online learning. The batch are
either selected randomly among all samples (without replacement) or
in a cyclic way.
batch_size : int in [1, n_trials]
Size of the batch used in online learning. Increasing it
regularizes the dictionary learning as there is less variance for
the successive estimates. But it also increases the computational
cost as more coding signals z_hat must be estimate at each
iteration.
solver_z : str
The solver to use for the z update. Options are
'l-bfgs' (default) | 'lgcd' |
'dicodile' (distributed LGCD, experimental)
solver_z_kwargs : dict
Additional keyword arguments to pass to update_z_multi
solver_d : str
The solver to use for the d update. If rank1 is False, only option is
'fista'. Else, options are 'alternate', 'alternate_adaptive' (default)
or 'joint'.
solver_d_kwargs : dict
Additional keyword arguments to provide to update_d
D_init : str or array, shape (n_atoms, n_channels + n_times_atoms) or \
shape (n_atoms, n_channels, n_times_atom)
The initial atoms or an initialization scheme in {'chunk' | 'random' |
'greedy'}.
unbiased_z_hat : boolean
If set to True, the value of the non-zero coefficients in the returned
z_hat are recomputed with reg=0 on the frozen support.
verbose : int
The verbosity level.
callback : func
A callback function called at the end of each loop of the
coordinate descent, with z_encoder and pobj as its arguments.
random_state : int | None
The random state.
raise_on_increase : boolean
Raise an error if the objective function increase
window : boolean
If True, re-parametrizes the atoms with a temporal Tukey window
sort_atoms : boolean
If True, the atoms are sorted by explained variances.
Returns
-------
pobj : list
The objective function value at each step of the coordinate descent.
times : list
The cumulative time for each iteration of the coordinate descent.
uv_hat : array, shape (n_atoms, n_channels + n_times_atom)
The atoms to learn from the data.
z_hat : array, shape (n_trials, n_atoms, n_times_valid)
The sparse activation matrix.
reg : float
Regularization parameter used.
"""
assert lmbd_max in ['fixed', 'scaled', 'per_atom', 'shared'], (
"lmbd_max should be in {'fixed', 'scaled', 'per_atom', 'shared'}, "
f"got '{lmbd_max}'"
)
_, n_channels, _ = check_dimension(X)
# Rescale the problem to avoid underflow issues
std_X = X.std()
X = X / std_X
if algorithm == "stochastic":
# The typical stochastic algorithm samples one signal, compute the
# associated value z and then perform one step of gradient descent
# for D.
assert (
'max_iter' not in solver_d_kwargs or solver_d_kwargs['max_iter'] != 1 # noqa
), (
"with algorithm='stochastic', max_iter is forced to 1."
)
solver_d_kwargs["max_iter"] = 1
elif algorithm == 'greedy':
# Initialize D with no atoms as they will be added sequentially.
D_init = 'greedy'
# initialization
start = time.time()
d_solver = get_solver_d(
n_channels, n_atoms, n_times_atom, solver_d=solver_d, rank1=rank1,
uv_constraint=uv_constraint, D_init=D_init, window=window,
random_state=random_state, **solver_d_kwargs
)
D_hat = d_solver.init_dictionary(X)
init_duration = time.time() - start
z_kwargs = dict(verbose=verbose, **solver_z_kwargs)
with get_z_encoder_for(
X, d_solver.D_hat, n_atoms, n_times_atom, n_jobs,
solver_z, z_kwargs, reg
) as z_encoder:
if callable(callback):
callback(z_encoder, [])
end_iter_func = get_iteration_func(
eps, stopping_pobj, callback, lmbd_max,
name, verbose, raise_on_increase
)
# common parameters
kwargs = dict(
z_encoder=z_encoder, d_solver=d_solver, n_iter=n_iter,
end_iter_func=end_iter_func, lmbd_max=lmbd_max,
verbose=verbose, random_state=random_state, name=name
)
kwargs.update(algorithm_params)
if algorithm == 'batch':
pobj, times = _batch_learn(greedy=False, **kwargs)
elif algorithm == "greedy":
pobj, times = _batch_learn(greedy=True, **kwargs)
elif algorithm == "online":
pobj, times = _online_learn(**kwargs)
elif algorithm == "stochastic":
# For stochastic learning, set forgetting factor alpha of the
# online algorithm to 0, making each step independent of previous
# steps and set D-update max_iter to a low value (typically 1).
kwargs['alpha'] = 0
pobj, times = _online_learn(**kwargs)
else:
raise NotImplementedError(
"Algorithm '{}' is not implemented to learn dictionary atoms."
.format(algorithm))
D_hat = d_solver.D_hat
z_hat = z_encoder.get_z_hat()
if sort_atoms:
D_hat, z_hat = sort_atoms_by_explained_variances(
D_hat, z_hat, n_channels=n_channels)
# recompute z_hat with no regularization and keeping the support fixed
if unbiased_z_hat:
start_unbiased_z_hat = time.time()
z_encoder.compute_z(unbiased_z_hat=True)
z_hat = z_encoder.get_z_hat()
if verbose > 1:
print(
"[{}] Compute the final z_hat with support freeze in "
"{:.2f}s".format(name, time.time() - start_unbiased_z_hat))
times[0] += init_duration
if verbose > 0:
print("[%s] Fit in %.1fs" % (name, time.time() - start))
# Rescale the solution to match the given scale of the problem
z_hat *= std_X
reg = z_encoder.reg * std_X
return pobj, times, D_hat, z_hat, reg
def _batch_learn(z_encoder, d_solver, end_iter_func, n_iter=100,
lmbd_max='fixed', reg=None, verbose=0, greedy=False,
random_state=None, name="batch"):
n_atoms = d_solver.n_atoms
if greedy:
n_iter_by_atom = 1
if n_iter < n_atoms * n_iter_by_atom:
raise ValueError('The greedy method needs at least %d iterations '
'to learn %d atoms. Got only n_iter=%d. Please '
'increase n_iter.' % (
n_iter_by_atom * n_atoms, n_atoms, n_iter))
# monitor cost function
times = [0]
pobj = [z_encoder.get_cost()]
for ii in range(n_iter): # outer loop of coordinate descent
if verbose == 1:
msg = '.' if ((ii + 1) % 50 != 0) else '+\n'
print(msg, end='')
sys.stdout.flush()
if verbose > 1:
print('[{}] CD iterations {} / {}'.format(name, ii, n_iter))
if greedy and ii % n_iter_by_atom == 0 and \
d_solver.D_hat.shape[0] < n_atoms:
# add a new atom every n_iter_by_atom iterations
d_solver.add_one_atom(z_encoder)
if lmbd_max in ['per_atom', 'shared'] or (
lmbd_max == 'scaled' and ii == 0
):
z_encoder.update_reg(lmbd_max == 'per_atom')
if verbose > 5:
print('[{}] lambda = {:.3e}'.format(name,
np.mean(z_encoder.reg)))
# Compute z update
start = time.time()
z_encoder.compute_z()
# monitor cost function
times.append(time.time() - start)
pobj.append(z_encoder.get_cost())
z_nnz = z_encoder.get_z_nnz()
if verbose > 5:
print(
'[{}] Objective (z) : {:.3e} (sparsity: {:.3e})'
.format(name, pobj[-1], z_nnz.mean())
)
if np.all(z_nnz == 0):
import warnings
warnings.warn("Regularization parameter `reg` is too large "
"and all the activations are zero. No atom has"
" been learned.", UserWarning)
break
# Compute D update
start = time.time()
d_solver.update_D(z_encoder)
# monitor cost function
times.append(time.time() - start)
pobj.append(z_encoder.get_cost())
null_atom_indices = np.where(z_nnz == 0)[0]
if len(null_atom_indices) > 0:
k0 = null_atom_indices[0]
d_solver.resample_atom(k0, z_encoder)
if verbose > 5:
print('[{}] Resampled atom {}'.format(name, k0))
if verbose > 5:
print('[{}] Objective (d) : {:.3e}'.format(name, pobj[-1]))
if ((not greedy or d_solver.D_hat.shape[0] == n_atoms)
and end_iter_func(z_encoder, pobj, ii)):
break
return pobj, times
def _online_learn(z_encoder, d_solver, end_iter_func, n_iter=100,
verbose=0, random_state=None, lmbd_max='fixed', reg=None,
alpha=.8, batch_selection='random', batch_size=1,
name="online"):
n_trials = z_encoder.n_trials
# monitor cost function
times = [0]
pobj = [z_encoder.get_cost()]
rng = check_random_state(random_state)
for ii in range(n_iter): # outer loop of coordinate descent
if verbose == 1:
msg = '.' if (ii % 50 != 0) else '+\n'
print(msg, end='')
sys.stdout.flush()
if verbose > 1:
print('[{}] CD iterations {} / {}'.format(name, ii, n_iter))
if lmbd_max in ['per_atom', 'shared'] or (
lmbd_max == 'scaled' and ii == 0
):
z_encoder.update_reg(lmbd_max == 'per_atom')
if verbose > 5:
print('[{}] lambda = {:.3e}'.format(name,
np.mean(z_encoder.reg)))
# Compute z update
start = time.time()
if batch_selection == 'random':
i0 = rng.choice(n_trials, batch_size, replace=False)
elif batch_selection == 'cyclic':
i_slice = (ii * batch_size) % n_trials
i0 = slice(i_slice, i_slice + batch_size)
else:
raise NotImplementedError(
"the '{}' batch_selection strategy for the online learning is "
"not implemented.".format(batch_selection))
z_encoder.compute_z_partial(i0, alpha)
# monitor cost function
times.append(time.time() - start)
pobj.append(z_encoder.get_cost())
z_nnz = z_encoder.get_z_nnz()
if verbose > 5:
print(
'[{}] Objective (z) : {:.3e} (sparsity: {:.3e})'
.format(name, pobj[-1], z_nnz.mean())
)
if np.all(z_nnz == 0):
import warnings
warnings.warn("Regularization parameter `reg` is too large "
"and all the activations are zero. No atoms has"
" been learned.", UserWarning)
break
# Compute D update
start = time.time()
d_solver.update_D(z_encoder)
# monitor cost function
times.append(time.time() - start)
pobj.append(z_encoder.get_cost())
null_atom_indices = np.where(z_nnz == 0)[0]
if len(null_atom_indices) > 0:
k0 = null_atom_indices[0]
d_solver.resample_atom(k0, z_encoder)
if verbose > 5:
print('[{}] Resampled atom {}'.format(name, k0))
if verbose > 5:
print('[{}] Objective (d) : {:.3e}'.format(name, pobj[-1]))
if end_iter_func(z_encoder, pobj, ii):
break
return pobj, times
def get_iteration_func(eps, stopping_pobj, callback, lmbd_max, name, verbose,
raise_on_increase):
def end_iteration(z_encoder, pobj, iteration):
if callable(callback):
callback(z_encoder, pobj)
# Only check that the cost is always going down when the regularization
# parameter is fixed.
dz = (pobj[-3] - pobj[-2]) / min(pobj[-3], pobj[-2])
du = (pobj[-2] - pobj[-1]) / min(pobj[-2], pobj[-1])
if ((dz < eps or du < eps) and lmbd_max in ['fixed', 'scaled']):
if dz < 0 and raise_on_increase:
raise RuntimeError(
"The z update have increased the objective value by {}."
.format(dz))
if du < -1e-10 and dz > 1e-12 and raise_on_increase:
raise RuntimeError(
"The d update have increased the objective value by {}."
"(dz={})".format(du, dz))
if dz < eps and du < eps:
if verbose == 1:
print("")
print("[{}] Converged after {} iteration, (dz, du) "
"= {:.3e}, {:.3e}".format(name, iteration + 1, dz, du))
return True
if stopping_pobj is not None and pobj[-1] < stopping_pobj:
return True
return False
return end_iteration
| bsd-3-clause | 9e336332007c19965cb7ac5731bfa34a | 37.359375 | 89 | 0.557288 | 3.813804 | false | false | false | false |
django-nonrel/django-nonrel | tests/modeltests/many_to_many/tests.py | 92 | 17756 | from django.test import TestCase
from models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
self.assertRaises(TypeError, a6.publications.add, a5)
# Add a Publication directly via publications.add by using keyword arguments.
p4 = a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
new_article = self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id,self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id,self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1,self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id,self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id,self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1,self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
| bsd-3-clause | af5960c635ad819c08633ea313ff9503 | 45.239583 | 113 | 0.566625 | 4.595238 | false | false | false | false |
batiste/django-page-cms | pages/views.py | 1 | 8526 | """Default example views"""
from pages import settings
from pages.models import Page, PageAlias
from pages.phttp import get_language_from_request, remove_slug
from pages.urlconf_registry import get_urlconf
from pages.utils import get_placeholders
from django.http import Http404, HttpResponsePermanentRedirect
from django.contrib.sitemaps import Sitemap
from django.urls import resolve, Resolver404
from django.utils import translation
from django.shortcuts import render
LANGUAGE_KEYS = [key for (key, value) in settings.PAGE_LANGUAGES]
class Details(object):
"""
This class based view get the root pages for navigation
and the current page to display if there is any.
All is rendered with the current page's template.
"""
def __call__(
self, request, path=None, lang=None, delegation=True,
**kwargs):
current_page = False
if path is None:
raise ValueError(
"pages.views.Details class view requires the path argument. "
"Check your urls.py file.")
# for the ones that might have forgotten to pass the language
# the language is now removed from the page path
if settings.PAGE_USE_LANGUAGE_PREFIX and lang is None:
maybe_lang = path.split("/")[0]
if maybe_lang in LANGUAGE_KEYS:
lang = maybe_lang
path = path[(len(lang) + 1):]
lang = self.choose_language(lang, request)
pages_navigation = self.get_navigation(request, path, lang)
context = {
'path': path,
'pages_navigation': pages_navigation,
'lang': lang,
}
is_staff = self.is_user_staff(request)
current_page = self.resolve_page(request, context, is_staff)
# Do redirect to new page (if enabled)
if settings.PAGE_REDIRECT_OLD_SLUG and current_page:
url = current_page.get_absolute_url(language=lang)
slug = current_page.get_complete_slug(language=lang)
current_url = request.get_full_path()
if url != path and url + '/' != current_url and slug != path:
return HttpResponsePermanentRedirect(url)
# if no pages has been found, we will try to find it via an Alias
if not current_page:
redirection = self.resolve_alias(request, path, lang)
if redirection:
return redirection
else:
context['current_page'] = current_page
# If unauthorized to see the pages, raise a 404, That can
# happen with expired pages.
if not is_staff and not current_page.visible:
raise Http404
redirection = self.resolve_redirection(request, context)
if redirection:
return redirection
self.extra_context(request, context)
if delegation and current_page.delegate_to:
answer = self.delegate(request, context, delegation, **kwargs)
if answer:
return answer
if kwargs.get('only_context', False):
return context
template_name = kwargs.get(
'template_name',
self.get_template(request, context))
context['template_name'] = template_name
return render(request, template_name, context)
def resolve_page(self, request, context, is_staff):
"""Return the appropriate page according to the path."""
path = context['path']
lang = context['lang']
page = Page.objects.from_path(
path, lang,
exclude_drafts=(not is_staff))
if page:
return page
# if the complete path didn't worked out properly
# and if didn't used PAGE_USE_STRICT_URL setting we gonna
# try to see if it might be a delegation page.
# To do that we remove the right part of the url and try again
# to find a page that match
if not settings.PAGE_USE_STRICT_URL:
path = remove_slug(path)
while path is not None:
page = Page.objects.from_path(
path, lang,
exclude_drafts=(not is_staff))
# find a match. Is the page delegating?
if page:
if page.delegate_to:
return page
path = remove_slug(path)
return None
def resolve_alias(self, request, path, lang):
alias = PageAlias.objects.from_path(request, path, lang)
if alias:
url = alias.page.get_url_path(lang)
return HttpResponsePermanentRedirect(url)
raise Http404
def resolve_redirection(self, request, context):
"""Check for redirections."""
current_page = context['current_page']
lang = context['lang']
if current_page.redirect_to_url:
return HttpResponsePermanentRedirect(current_page.redirect_to_url)
if current_page.redirect_to:
return HttpResponsePermanentRedirect(
current_page.redirect_to.get_url_path(lang))
def get_navigation(self, request, path, lang):
"""Get the pages that are at the root level."""
return Page.objects.navigation().order_by("tree_id")
def choose_language(self, lang, request):
"""Deal with the multiple corner case of choosing the language."""
# Can be an empty string or None
if not lang:
lang = get_language_from_request(request)
# Raise a 404 if the language is not in not in the list
if lang not in [key for (key, value) in settings.PAGE_LANGUAGES]:
raise Http404
# We're going to serve CMS pages in language lang;
# make django gettext use that language too
if lang and translation.check_for_language(lang):
translation.activate(lang)
return lang
def get_template(self, request, context):
"""Just there in case you have special business logic."""
return context['current_page'].get_template()
def is_user_staff(self, request):
"""Return True if the user is staff."""
return request.user.is_authenticated and request.user.is_staff
def extra_context(self, request, context):
"""Call the PAGE_EXTRA_CONTEXT function if there is one."""
if settings.PAGE_EXTRA_CONTEXT:
context.update(settings.PAGE_EXTRA_CONTEXT())
def delegate(self, request, context, delegation=True):
# if there is a delegation to another view,
# call this view instead.
current_page = context['current_page']
path = context['path']
delegate_path = path.replace(
current_page.get_complete_slug(hideroot=False), "")
# it seems that the urlconf path have to start with a slash
if len(delegate_path) == 0:
delegate_path = "/"
if delegate_path.startswith("//"):
delegate_path = delegate_path[1:]
urlconf = get_urlconf(current_page.delegate_to)
try:
result = resolve(delegate_path, urlconf)
except Resolver404:
raise Http404
if result:
view, args, kwargs = result
kwargs.update(context)
# for now the view is called as is.
return view(request, *args, **kwargs)
# The Details view instance. It's the same object for
# everybody so be careful to maintain it thread safe.
# ie: NO self.attribute = something
details = Details()
class PageSitemap(Sitemap):
"""This site map implementation expose the pages
in the default language only."""
changefreq = "weekly"
priority = 0.5
def items(self):
return Page.objects.published()
def lastmod(self, obj):
return obj.last_modification_date
class PageItemProxy(object):
def __init__(self, page, lang):
self.page = page
self.lang = lang
def get_absolute_url(self):
return self.page.get_absolute_url(language=self.lang)
class MultiLanguagePageSitemap(Sitemap):
"""This site map implementation expose the pages
in all the languages."""
changefreq = "weekly"
priority = 0.5
def items(self):
item_list = []
for page in Page.objects.published():
for lang in page.get_languages():
item_list.append(PageItemProxy(page, lang))
return item_list
def lastmod(self, obj):
return obj.page.last_modification_date
| bsd-3-clause | d2f4a2bffba458d944f765b35aac23da | 33.518219 | 78 | 0.612714 | 4.358896 | false | false | false | false |
django-nonrel/django-nonrel | django/utils/version.py | 320 | 1361 | import django
import os.path
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return u'SVN-%s' % rev
return u'SVN-unknown'
| bsd-3-clause | 73c6944b52dd1320aa5e09161e6cddb3 | 31.404762 | 81 | 0.606907 | 4.07485 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/admin/widgets.py | 156 | 12061 | """
Form Widget classes specific to the Django admin site.
"""
import django.utils.copycompat as copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None: attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked: attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.DateInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'}, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'}, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if isinstance(v, list):
v = u','.join([str(x) for x in v])
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = unicode(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None, using=None):
self.rel = rel
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = u'?' + u'&'.join([u'%s=%s' % (k, v) for k, v in params.items()])
else:
url = u''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append(u'<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
return url_params_from_lookup_dict(self.rel.limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_unicode(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| bsd-3-clause | 0cdcdfcfdff0024fe46575d0f797506d | 39.746622 | 157 | 0.620595 | 3.807134 | false | false | false | false |
django-nonrel/django-nonrel | django/db/models/fields/related.py | 20 | 54904 | from django.conf import settings
from django.db import connection, router, transaction, connections
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_unicode
from django.utils.translation import (ugettext_lazy as _, string_concat,
ungettext, ugettext)
from django.utils.functional import curry
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
except AttributeError:
# If it doesn't have a split it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
other = self.rel.to
if isinstance(other, basestring) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return self._pk_trace(value, 'get_prep_lookup', lookup_type)
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_prep_lookup', lookup_type) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return [self._pk_trace(value, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)]
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)
for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def _pk_trace(self, value, prep_func, lookup_type, **kwargs):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
# In the case of an FK to 'self', this check allows to_field to be used
# for both forwards and reverse lookups across the FK. (For normal FKs,
# it's only relevant for forward lookups).
if isinstance(v, self.rel.to):
field_name = getattr(self.rel, "field_name", None)
else:
field_name = None
try:
while True:
if field_name is None:
field_name = v._meta.pk.name
v = getattr(v, field_name)
field_name = None
except AttributeError:
pass
except exceptions.ObjectDoesNotExist:
v = None
field = self
while field.rel:
if hasattr(field.rel, 'field_name'):
field = field.rel.to._meta.get_field(field.rel.field_name)
else:
field = field.rel.to._meta.pk
if lookup_type in ('range', 'in'):
v = [v]
v = getattr(field, prep_func)(lookup_type, v, **kwargs)
if isinstance(v, list):
v = v[0]
return v
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or self.opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
db = router.db_for_read(self.related.model, instance=instance)
rel_obj = self.related.model._base_manager.using(db).get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# Set the value of the related field to the value of the related object's related field
setattr(value, self.related.field.attname, getattr(instance, self.related.field.rel.get_related_field().attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
db = router.db_for_read(self.field.rel.to, instance=instance)
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.using(db).get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).using(db).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self._field.name)
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.field.get_cache_name(), None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related:
cache_name = self.field.related.get_cache_name()
try:
delattr(related, cache_name)
except AttributeError:
pass
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance,
self.related.model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance,
self.related.model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Creates the managers used by other methods (__get__() and delete()).
"""
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def get_query_set(self):
db = self._db or router.db_for_read(rel_model, instance=instance)
return superclass.get_query_set(self).using(db).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def create_many_related_manager(superclass, rel=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
through = rel.through
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_field_name=None, target_field_name=None,
reverse=False):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.through = through
self._pk_val = self.instance.pk
self.reverse = reverse
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return superclass.get_query_set(self).using(db)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not rel.through._meta.auto_created:
opts = through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# join_table: name of the m2m link table
# source_field_name: the PK fieldname in join_table for the source object
# target_field_name: the PK fieldname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
new_ids.add(obj.pk)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
for obj_id in new_ids:
self.through._default_manager.using(db).create(**{
'%s_id' % source_field_name: self._pk_val,
'%s_id' % target_field_name: obj_id,
})
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj.pk)
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_col_name: the PK colname in join_table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def _through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
through = property(_through)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Model %(model)s with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
Field.__init__(self, **kwargs)
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(self.error_messages['invalid'] % {
'model': self.rel.to._meta.verbose_name, 'pk': value})
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value,
connection=connections[router.db_for_read(self.rel.to)])
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, basestring) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, basestring):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name),
to: models.ForeignKey(to_model, related_name='%s+' % name)
})
class ManyToManyField(RelatedField, Field):
description = _("Many-to-many relationship")
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to==RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
Field.__init__(self, **kwargs)
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
if not self.rel.through and not cls._meta.abstract:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| bsd-3-clause | 0538fba385c60894ba19579e4ca134b5 | 45.926496 | 222 | 0.60236 | 4.152787 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/forms/localflavor/cl.py | 86 | 2290 | from django.contrib.localflavor.cl.forms import CLRutField, CLRegionSelect
from django.core.exceptions import ValidationError
from utils import LocalFlavorTestCase
class CLLocalFlavorTests(LocalFlavorTestCase):
def test_CLRegionSelect(self):
f = CLRegionSelect()
out = u'''<select name="foo">
<option value="RM">Regi\xf3n Metropolitana de Santiago</option>
<option value="I">Regi\xf3n de Tarapac\xe1</option>
<option value="II">Regi\xf3n de Antofagasta</option>
<option value="III">Regi\xf3n de Atacama</option>
<option value="IV">Regi\xf3n de Coquimbo</option>
<option value="V">Regi\xf3n de Valpara\xedso</option>
<option value="VI">Regi\xf3n del Libertador Bernardo O'Higgins</option>
<option value="VII">Regi\xf3n del Maule</option>
<option value="VIII">Regi\xf3n del B\xedo B\xedo</option>
<option value="IX">Regi\xf3n de la Araucan\xeda</option>
<option value="X">Regi\xf3n de los Lagos</option>
<option value="XI">Regi\xf3n de Ays\xe9n del General Carlos Ib\xe1\xf1ez del Campo</option>
<option value="XII">Regi\xf3n de Magallanes y la Ant\xe1rtica Chilena</option>
<option value="XIV">Regi\xf3n de Los R\xedos</option>
<option value="XV">Regi\xf3n de Arica-Parinacota</option>
</select>'''
self.assertEqual(f.render('foo', 'bar'), out)
def test_CLRutField(self):
error_invalid = [u'The Chilean RUT is not valid.']
error_format = [u'Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.']
valid = {
'11-6': '11-6',
'116': '11-6',
'767484100': '76.748.410-0',
'78.412.790-7': '78.412.790-7',
'8.334.6043': '8.334.604-3',
'76793310-K': '76.793.310-K',
'76793310-k': '76.793.310-K',
}
invalid = {
'11.111.111-0': error_invalid,
'111': error_invalid,
}
self.assertFieldOutput(CLRutField, valid, invalid)
# deal with special "Strict Mode".
invalid = {
'11-6': error_format,
'767484100': error_format,
'8.334.6043': error_format,
'76793310-K': error_format,
'11.111.111-0': error_invalid
}
self.assertFieldOutput(CLRutField,
{}, invalid, field_kwargs={"strict": True}
)
| bsd-3-clause | 299e2f60826a5ddbf9fe92a749e734f9 | 39.175439 | 91 | 0.627074 | 2.858926 | false | true | false | false |
django-nonrel/django-nonrel | django/contrib/comments/forms.py | 245 | 8730 | import time
import datetime
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from models import Comment
from django.utils.crypto import salted_hmac, constant_time_compare
from django.utils.encoding import force_unicode
from django.utils.hashcompat import sha_constructor
from django.utils.text import get_text_list
from django.utils.translation import ungettext, ugettext_lazy as _
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class CommentSecurityForm(forms.Form):
"""
Handles the security aspects (anti-spoofing) for comment forms.
"""
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput)
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(CommentSecurityForm, self).__init__(data=data, initial=initial)
def security_errors(self):
"""Return just those errors associated with security"""
errors = ErrorDict()
for f in ["honeypot", "timestamp", "security_hash"]:
if f in self.errors:
errors[f] = self.errors[f]
return errors
def clean_security_hash(self):
"""Check the security hash."""
security_hash_dict = {
'content_type' : self.data.get("content_type", ""),
'object_pk' : self.data.get("object_pk", ""),
'timestamp' : self.data.get("timestamp", ""),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data["security_hash"]
if not constant_time_compare(expected_hash, actual_hash):
# Fallback to Django 1.2 method for compatibility
# PendingDeprecationWarning <- here to remind us to remove this
# fallback in Django 1.5
expected_hash_old = self._generate_security_hash_old(**security_hash_dict)
if not constant_time_compare(expected_hash_old, actual_hash):
raise forms.ValidationError("Security hash check failed.")
return actual_hash
def clean_timestamp(self):
"""Make sure the timestamp isn't too far (> 2 hours) in the past."""
ts = self.cleaned_data["timestamp"]
if time.time() - ts > (2 * 60 * 60):
raise forms.ValidationError("Timestamp check failed")
return ts
def generate_security_data(self):
"""Generate a dict of security data for "initial" data."""
timestamp = int(time.time())
security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
'security_hash' : self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, timestamp):
"""
Generate a HMAC security hash from the provided info.
"""
info = (content_type, object_pk, timestamp)
key_salt = "django.contrib.forms.CommentSecurityForm"
value = "-".join(info)
return salted_hmac(key_salt, value).hexdigest()
def _generate_security_hash_old(self, content_type, object_pk, timestamp):
"""Generate a (SHA1) security hash from the provided info."""
# Django 1.2 compatibility
info = (content_type, object_pk, timestamp, settings.SECRET_KEY)
return sha_constructor("".join(info)).hexdigest()
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
name = forms.CharField(label=_("Name"), max_length=50)
email = forms.EmailField(label=_("Email address"))
url = forms.URLField(label=_("URL"), required=False)
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
user_name = self.cleaned_data["name"],
user_email = self.cleaned_data["email"],
user_url = self.cleaned_data["url"],
comment = self.cleaned_data["comment"],
submit_date = datetime.datetime.now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.using(
self.target_object._state.db
).filter(
content_type = new.content_type,
object_pk = new.object_pk,
user_name = new.user_name,
user_email = new.user_email,
user_url = new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["comment"]
if settings.COMMENTS_ALLOW_PROFANITIES == False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
plural = len(bad_words) > 1
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.", plural) % \
get_text_list(['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1]) for i in bad_words], 'and'))
return comment
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '\
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
| bsd-3-clause | f39a6a2c4c7294e841260bcee9764005 | 41.378641 | 106 | 0.609393 | 4.225557 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/admin/filterspecs.py | 5 | 12482 | """
FilterSpec encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
from django.db import models
from django.utils.encoding import smart_unicode, iri_to_uri
from django.utils.translation import ugettext as _
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.contrib.admin.util import get_model_from_relation, \
reverse_field_path, get_limit_choices_to_from_path
import datetime
class FilterSpec(object):
filter_specs = []
def __init__(self, f, request, params, model, model_admin,
field_path=None):
self.field = f
self.params = params
self.field_path = field_path
if field_path is None:
if isinstance(f, models.related.RelatedObject):
self.field_path = f.var_name
else:
self.field_path = f.name
def register(cls, test, factory):
cls.filter_specs.append((test, factory))
register = classmethod(register)
def create(cls, f, request, params, model, model_admin, field_path=None):
for test, factory in cls.filter_specs:
if test(f):
return factory(f, request, params, model, model_admin,
field_path=field_path)
create = classmethod(create)
def has_output(self):
return True
def choices(self, cl):
raise NotImplementedError()
def title(self):
return self.field.verbose_name
def output(self, cl):
t = []
if self.has_output():
t.append(_(u'<h3>By %s:</h3>\n<ul>\n') % escape(self.title()))
for choice in self.choices(cl):
t.append(u'<li%s><a href="%s">%s</a></li>\n' % \
((choice['selected'] and ' class="selected"' or ''),
iri_to_uri(choice['query_string']),
choice['display']))
t.append('</ul>\n\n')
return mark_safe("".join(t))
class RelatedFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(RelatedFilterSpec, self).__init__(
f, request, params, model, model_admin, field_path=field_path)
other_model = get_model_from_relation(f)
if isinstance(f, (models.ManyToManyField,
models.related.RelatedObject)):
# no direct field on this model, get name from other model
self.lookup_title = other_model._meta.verbose_name
else:
self.lookup_title = f.verbose_name # use field name
if hasattr(f, 'rel'):
rel_name = f.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (self.field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(
self.lookup_kwarg_isnull, None)
self.lookup_choices = f.get_choices(include_blank=False)
def has_output(self):
if isinstance(self.field, models.related.RelatedObject) \
and self.field.field.null or hasattr(self.field, 'rel') \
and self.field.null:
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def title(self):
return self.lookup_title
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {'selected': self.lookup_val is None
and not self.lookup_val_isnull,
'query_string': cl.get_query_string(
{},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All')}
for pk_val, val in self.lookup_choices:
yield {'selected': self.lookup_val == smart_unicode(pk_val),
'query_string': cl.get_query_string(
{self.lookup_kwarg: pk_val},
[self.lookup_kwarg_isnull]),
'display': val}
if isinstance(self.field, models.related.RelatedObject) \
and self.field.field.null or hasattr(self.field, 'rel') \
and self.field.null:
yield {'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string(
{self.lookup_kwarg_isnull: 'True'},
[self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE}
FilterSpec.register(lambda f: (
hasattr(f, 'rel') and bool(f.rel) or
isinstance(f, models.related.RelatedObject)), RelatedFilterSpec)
class BooleanFieldFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(BooleanFieldFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.lookup_kwarg = '%s__exact' % self.field_path
self.lookup_kwarg2 = '%s__isnull' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
def title(self):
return self.field.verbose_name
def choices(self, cl):
for k, v in ((_('All'), None), (_('Yes'), '1'), (_('No'), '0')):
yield {'selected': self.lookup_val == v and not self.lookup_val2,
'query_string': cl.get_query_string(
{self.lookup_kwarg: v},
[self.lookup_kwarg2]),
'display': k}
if isinstance(self.field, models.NullBooleanField):
yield {'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string(
{self.lookup_kwarg2: 'True'},
[self.lookup_kwarg]),
'display': _('Unknown')}
FilterSpec.register(lambda f: isinstance(f, models.BooleanField)
or isinstance(f, models.NullBooleanField),
BooleanFieldFilterSpec)
class ChoicesFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(ChoicesFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.lookup_kwarg = '%s__exact' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
def choices(self, cl):
yield {'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')}
for k, v in self.field.flatchoices:
yield {'selected': smart_unicode(k) == self.lookup_val,
'query_string': cl.get_query_string(
{self.lookup_kwarg: k}),
'display': v}
FilterSpec.register(lambda f: bool(f.choices), ChoicesFilterSpec)
class DateFieldFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(DateFieldFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.field_generic = '%s__' % self.field_path
self.date_params = dict([(k, v) for k, v in params.items()
if k.startswith(self.field_generic)])
today = datetime.date.today()
one_week_ago = today - datetime.timedelta(days=7)
today_str = isinstance(self.field, models.DateTimeField) \
and today.strftime('%Y-%m-%d 23:59:59') \
or today.strftime('%Y-%m-%d')
self.links = (
(_('Any date'), {}),
(_('Today'), {'%s__year' % self.field_path: str(today.year),
'%s__month' % self.field_path: str(today.month),
'%s__day' % self.field_path: str(today.day)}),
(_('Past 7 days'), {'%s__gte' % self.field_path:
one_week_ago.strftime('%Y-%m-%d'),
'%s__lte' % self.field_path: today_str}),
(_('This month'), {'%s__year' % self.field_path: str(today.year),
'%s__month' % self.field_path: str(today.month)}),
(_('This year'), {'%s__year' % self.field_path: str(today.year)})
)
def title(self):
return self.field.verbose_name
def choices(self, cl):
for title, param_dict in self.links:
yield {'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict,
[self.field_generic]),
'display': title}
FilterSpec.register(lambda f: isinstance(f, models.DateField),
DateFieldFilterSpec)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldFilterSpec, that'd be much
# more appropriate, and the AllValuesFilterSpec won't get used for it.
class AllValuesFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(AllValuesFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.lookup_kwarg = self.field_path
self.lookup_kwarg_isnull = '%s__isnull' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, self.field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = \
queryset.distinct().order_by(f.name).values_list(f.name, flat=True)
def title(self):
return self.field.verbose_name
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {'selected': self.lookup_val is None
and self.lookup_val_isnull is None,
'query_string': cl.get_query_string(
{},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All')}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_unicode(val)
yield {'selected': self.lookup_val == val,
'query_string': cl.get_query_string(
{self.lookup_kwarg: val},
[self.lookup_kwarg_isnull]),
'display': val}
if include_none:
yield {'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string(
{self.lookup_kwarg_isnull: 'True'},
[self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE}
FilterSpec.register(lambda f: True, AllValuesFilterSpec)
| bsd-3-clause | d30b438449abbd4972dfda25835ff9f4 | 43.262411 | 79 | 0.532046 | 4.184378 | false | false | false | false |
django-nonrel/django-nonrel | django/views/generic/base.py | 56 | 6110 | from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template import RequestContext, loader
from django.template.response import TemplateResponse
from django.utils.functional import update_wrapper
from django.utils.log import getLogger
from django.utils.decorators import classonlymethod
logger = getLogger('django.request')
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in kwargs.iteritems():
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(u"You tried to pass in the %s method name as a "
u"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError(u"%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
self.request = request
self.args = args
self.kwargs = kwargs
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
allowed_methods = [m for m in self.http_method_names if hasattr(self, m)]
logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path),
extra={
'status_code': 405,
'request': self.request
}
)
return http.HttpResponseNotAllowed(allowed_methods)
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
response_class = TemplateResponse
def render_to_response(self, context, **response_kwargs):
"""
Returns a response with a template rendered with the given context.
"""
return self.response_class(
request = self.request,
template = self.get_template_names(),
context = context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, View):
"""
A view that renders a template.
"""
def get_context_data(self, **kwargs):
return {
'params': kwargs
}
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = True
url = None
query_string = False
def get_redirect_url(self, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
args = self.request.META["QUERY_STRING"]
if args and self.query_string:
url = "%s?%s" % (self.url, args)
else:
url = self.url
return url % kwargs
else:
return None
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s' % self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| bsd-3-clause | c7ae8407caaf7bc95e8445289344373d | 33.325843 | 89 | 0.584452 | 4.472914 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/modeladmin/tests.py | 49 | 41260 | from datetime import date
from django import forms
from django.conf import settings
from django.contrib.admin.options import ModelAdmin, TabularInline, \
HORIZONTAL, VERTICAL
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.validation import validate
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase
from django.utils import unittest
from models import Band, Concert, ValidationTestModel, \
ValidationTestInlineModel
# None of the following tests really depend on the content of the request,
# so we'll just pass in None.
request = None
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual( ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields arent in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(), ['name'])
self.assertEqual(ma.get_form(request, self.band).base_fields.keys(),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(), ['name'])
self.assertEqual(ma.get_form(request, self.band).base_fields.keys(),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Beatles</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % self.band.id)
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(u'', u'---------'), (self.band.id, u'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[(u'', u'---------'), (self.band.id, u'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, u'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[(u'', u'None'), (self.band.id, u'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', u'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class ValidationTests(unittest.TestCase):
def test_validation_only_runs_in_debug(self):
# Ensure validation only runs when DEBUG = True
try:
settings.DEBUG = True
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
site = AdminSite()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
site.register,
ValidationTestModel,
ValidationTestModelAdmin,
)
finally:
settings.DEBUG = False
site = AdminSite()
site.register(ValidationTestModel, ValidationTestModelAdmin)
def test_raw_id_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fieldsets_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("non_existent_field",)}),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
fields = ["name",]
self.assertRaisesRegexp(
ImproperlyConfigured,
"Both fieldsets and fields are specified in ValidationTestModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertRaisesRegexp(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fields = ["name", "name"]
self.assertRaisesRegexp(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fields",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_form_validation(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertRaisesRegexp(
ImproperlyConfigured,
"ValidationTestModelAdmin.form does not inherit from BaseModelForm.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
validate(BandAdmin, Band)
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
validate(BandAdmin, Band)
def test_filter_vertical_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_filter_horizontal_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_radio_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"name": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_prepopulated_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"non_existent_field": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("non_existent_field",)}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ("name",)}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"ValidationTestModelAdmin.list_display\[0\], 'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_links_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_per_page_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_per_page' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_search_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.search_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_date_hierarchy_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_ordering_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_select_related_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_select_related' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_as_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_as' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_on_top_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_on_top' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_inlines_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fields_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = ("non_existent_field",)
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fk_name_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "non_existent_field"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'ValidationTestInlineModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_extra_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.extra' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_num_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.max_num' should be an integer or None \(default\).",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_formset_validation(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.formset' does not inherit from BaseModelFormSet.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
| bsd-3-clause | 7ced025350d63c734c94c90d26921ef7 | 32.57201 | 188 | 0.604823 | 5.178863 | false | true | false | false |
django-nonrel/django-nonrel | django/middleware/common.py | 157 | 6658 | import re
from django.conf import settings
from django import http
from django.core.mail import mail_managers
from django.utils.http import urlquote
from django.core import urlresolvers
from django.utils.hashcompat import md5_constructor
from django.utils.log import getLogger
logger = getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s' % request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not _is_valid_path(request.path_info, urlconf) and
_is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError, (""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1])
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.is_secure() and 'https' or 'http',
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.GET:
newurl += '?' + request.META['QUERY_STRING']
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"Send broken link emails and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS and not settings.DEBUG:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.get_host()
referer = request.META.get('HTTP_REFERER', None)
is_internal = _is_internal_request(domain, referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\nIP address: %s\n" \
% (referer, request.get_full_path(), ua, ip),
fail_silently=True)
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
else:
etag = '"%s"' % md5_constructor(response.content).hexdigest()
if response.status_code >= 200 and response.status_code < 300 and request.META.get('HTTP_IF_NONE_MATCH') == etag:
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"""
Returns True if a 404 at the given URL *shouldn't* notify the site managers.
"""
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return False
def _is_internal_request(domain, referer):
"""
Returns true if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return referer is not None and re.match("^https?://%s/" % re.escape(domain), referer)
def _is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
urlresolvers.resolve(path, urlconf)
return True
except urlresolvers.Resolver404:
return False
| bsd-3-clause | 4a2a42105ab8d58eb2439e81d4ce647f | 41.139241 | 125 | 0.579303 | 4.295484 | false | false | false | false |
django-nonrel/django-nonrel | django/views/generic/date_based.py | 246 | 14025 | import datetime
import time
from django.template import loader, RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.xheaders import populate_xheaders
from django.db.models.fields import DateTimeField
from django.http import Http404, HttpResponse
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
def archive_index(request, queryset, date_field, num_latest=15,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
mimetype=None, allow_future=False, template_object_name='latest'):
"""
Generic top-level archive of date-based objects.
Templates: ``<app_label>/<model_name>_archive.html``
Context:
date_list
List of years
latest
Latest N (defaults to 15) objects by date
"""
if extra_context is None: extra_context = {}
model = queryset.model
if not allow_future:
queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
date_list = queryset.dates(date_field, 'year')[::-1]
if not date_list and not allow_empty:
raise Http404("No %s available" % model._meta.verbose_name)
if date_list and num_latest:
latest = queryset.order_by('-'+date_field)[:num_latest]
else:
latest = None
if not template_name:
template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list' : date_list,
template_object_name : latest,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_year(request, year, queryset, date_field, template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object', mimetype=None,
make_object_list=False, allow_future=False):
"""
Generic yearly archive view.
Templates: ``<app_label>/<model_name>_archive_year.html``
Context:
date_list
List of months in this year with objects
year
This year
object_list
List of objects published in the given month
(Only available if make_object_list argument is True)
"""
if extra_context is None: extra_context = {}
model = queryset.model
now = datetime.datetime.now()
lookup_kwargs = {'%s__year' % date_field: year}
# Only bother to check current date if the year isn't in the past and future objects aren't requested.
if int(year) >= now.year and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month')
if not date_list and not allow_empty:
raise Http404
if make_object_list:
object_list = queryset.filter(**lookup_kwargs)
else:
object_list = []
if not template_name:
template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'year': year,
'%s_list' % template_object_name: object_list,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
date_list:
List of days in this month with objects
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
date_list = object_list.dates(date_field, 'day')
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day
elif last_day <= datetime.date.today():
next_month = last_day
else:
next_month = None
# Calculate the previous month
if first_day.month == 1:
previous_month = first_day.replace(year=first_day.year-1,month=12)
else:
previous_month = first_day.replace(month=first_day.month-1)
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': previous_month,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime(year+'-0-'+week, '%Y-%w-%U')
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_today(request, **kwargs):
"""
Generic daily archive view for today. Same as archive_day view.
"""
today = datetime.date.today()
kwargs.update({
'year': str(today.year),
'month': today.strftime('%b').lower(),
'day': str(today.day),
})
return archive_day(request, **kwargs)
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field='slug', template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield")
try:
obj = queryset.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for" % model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
| bsd-3-clause | 2cf51060bef57f0912f99bb1e2534df0 | 36.4 | 156 | 0.624171 | 3.772189 | false | false | false | false |
django-nonrel/django-nonrel | tests/modeltests/delete/models.py | 51 | 3216 | from django.db import models, IntegrityError
class R(models.Model):
is_default = models.BooleanField(default=False)
def __str__(self):
return "%s" % self.pk
get_default_r = lambda: R.objects.get_or_create(is_default=True)[0]
class S(models.Model):
r = models.ForeignKey(R)
class T(models.Model):
s = models.ForeignKey(S)
class U(models.Model):
t = models.ForeignKey(T)
class RChild(R):
pass
class A(models.Model):
name = models.CharField(max_length=30)
auto = models.ForeignKey(R, related_name="auto_set")
auto_nullable = models.ForeignKey(R, null=True,
related_name='auto_nullable_set')
setvalue = models.ForeignKey(R, on_delete=models.SET(get_default_r),
related_name='setvalue')
setnull = models.ForeignKey(R, on_delete=models.SET_NULL, null=True,
related_name='setnull_set')
setdefault = models.ForeignKey(R, on_delete=models.SET_DEFAULT,
default=get_default_r, related_name='setdefault_set')
setdefault_none = models.ForeignKey(R, on_delete=models.SET_DEFAULT,
default=None, null=True, related_name='setnull_nullable_set')
cascade = models.ForeignKey(R, on_delete=models.CASCADE,
related_name='cascade_set')
cascade_nullable = models.ForeignKey(R, on_delete=models.CASCADE, null=True,
related_name='cascade_nullable_set')
protect = models.ForeignKey(R, on_delete=models.PROTECT, null=True)
donothing = models.ForeignKey(R, on_delete=models.DO_NOTHING, null=True,
related_name='donothing_set')
child = models.ForeignKey(RChild, related_name="child")
child_setnull = models.ForeignKey(RChild, on_delete=models.SET_NULL, null=True,
related_name="child_setnull")
# A OneToOneField is just a ForeignKey unique=True, so we don't duplicate
# all the tests; just one smoke test to ensure on_delete works for it as
# well.
o2o_setnull = models.ForeignKey(R, null=True,
on_delete=models.SET_NULL, related_name="o2o_nullable_set")
def create_a(name):
a = A(name=name)
for name in ('auto', 'auto_nullable', 'setvalue', 'setnull', 'setdefault',
'setdefault_none', 'cascade', 'cascade_nullable', 'protect',
'donothing', 'o2o_setnull'):
r = R.objects.create()
setattr(a, name, r)
a.child = RChild.objects.create()
a.child_setnull = RChild.objects.create()
a.save()
return a
class M(models.Model):
m2m = models.ManyToManyField(R, related_name="m_set")
m2m_through = models.ManyToManyField(R, through="MR",
related_name="m_through_set")
m2m_through_null = models.ManyToManyField(R, through="MRNull",
related_name="m_through_null_set")
class MR(models.Model):
m = models.ForeignKey(M)
r = models.ForeignKey(R)
class MRNull(models.Model):
m = models.ForeignKey(M)
r = models.ForeignKey(R, null=True, on_delete=models.SET_NULL)
class Avatar(models.Model):
pass
class User(models.Model):
avatar = models.ForeignKey(Avatar, null=True)
class HiddenUser(models.Model):
r = models.ForeignKey(R, related_name="+")
class HiddenUserProfile(models.Model):
user = models.ForeignKey(HiddenUser)
| bsd-3-clause | 84f57c6eec2f6a94210d0b69b15d07ea | 29.339623 | 83 | 0.672886 | 3.332642 | false | false | false | false |
django-nonrel/django-nonrel | django/core/serializers/pyyaml.py | 204 | 1948 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import decimal
import yaml
from django.db import models
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(yaml.load(stream), **options):
yield obj
| bsd-3-clause | bc8800443751803de321771607a2ee26 | 33.785714 | 88 | 0.698152 | 4.041494 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/utils/http.py | 1 | 2647 | from django.utils import http
from django.utils import unittest
from django.http import HttpResponse, utils
from django.test import RequestFactory
class TestUtilsHttp(unittest.TestCase):
def test_same_origin_true(self):
# Identical
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com/'))
# One with trailing slash - see #15617
self.assertTrue(http.same_origin('http://foo.com', 'http://foo.com/'))
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com'))
# With port
self.assertTrue(http.same_origin('https://foo.com:8000', 'https://foo.com:8000/'))
def test_same_origin_false(self):
# Different scheme
self.assertFalse(http.same_origin('http://foo.com', 'https://foo.com'))
# Different host
self.assertFalse(http.same_origin('http://foo.com', 'http://goo.com'))
# Different host again
self.assertFalse(http.same_origin('http://foo.com', 'http://foo.com.evil.com'))
# Different port
self.assertFalse(http.same_origin('http://foo.com:8000', 'http://foo.com:8001'))
def test_fix_IE_for_vary(self):
"""
Regression for #16632.
`fix_IE_for_vary` shouldn't crash when there's no Content-Type header.
"""
# functions to generate responses
def response_with_unsafe_content_type():
r = HttpResponse(content_type="text/unsafe")
r['Vary'] = 'Cookie'
return r
def no_content_response_with_unsafe_content_type():
# 'Content-Type' always defaulted, so delete it
r = response_with_unsafe_content_type()
del r['Content-Type']
return r
# request with & without IE user agent
rf = RequestFactory()
request = rf.get('/')
ie_request = rf.get('/', HTTP_USER_AGENT='MSIE')
# not IE, unsafe_content_type
response = response_with_unsafe_content_type()
utils.fix_IE_for_vary(request, response)
self.assertTrue('Vary' in response)
# IE, unsafe_content_type
response = response_with_unsafe_content_type()
utils.fix_IE_for_vary(ie_request, response)
self.assertFalse('Vary' in response)
# not IE, no_content
response = no_content_response_with_unsafe_content_type()
utils.fix_IE_for_vary(request, response)
self.assertTrue('Vary' in response)
# IE, no_content
response = no_content_response_with_unsafe_content_type()
utils.fix_IE_for_vary(ie_request, response)
self.assertFalse('Vary' in response)
| bsd-3-clause | d6ac8924abff01db10d0dfdc1049cc63 | 36.28169 | 90 | 0.615791 | 3.765292 | false | true | false | false |
django-nonrel/django-nonrel | tests/modeltests/one_to_one/tests.py | 92 | 5714 | from django.test import TestCase
from django.db import transaction, IntegrityError
from models import Place, Restaurant, Waiter, ManualPrimaryKey, RelatedModel, MultiModel
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place(name='Demon Dogs', address='944 W. Fullerton')
self.p1.save()
self.p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
self.p2.save()
self.r = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r.save()
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
self.assertRaises(Restaurant.DoesNotExist, getattr, self.p2, 'restaurant')
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r.place = self.p2
self.r.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r.waiter_set.create(name='Joe')
w.save()
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1)
assert_filter_waiters(restaurant__pk=self.p1.pk)
assert_filter_waiters(restaurant=self.p1.pk)
assert_filter_waiters(restaurant=self.r)
assert_filter_waiters(id__exact=self.p1.pk)
assert_filter_waiters(pk=self.p1.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.p1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
sid = transaction.savepoint()
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
self.assertRaises(IntegrityError, mm.save)
transaction.savepoint_rollback(sid)
| bsd-3-clause | ddd6eaaa1a27ea71b4fe829175ee209b | 47.016807 | 95 | 0.644207 | 3.533704 | false | true | false | false |
django-nonrel/django-nonrel | django/core/management/commands/loaddata.py | 250 | 11042 | import sys
import os
import gzip
import zipfile
from optparse import make_option
from django.conf import settings
from django.core import serializers
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import get_apps
from django.utils.itercompat import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
args = "fixture [fixture ...]"
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.'),
)
def handle(self, *fixture_labels, **options):
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=using)
transaction.enter_transaction_management(using=using)
transaction.managed(True, using=using)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity >= 2:
self.stdout.write("Loading '%s' fixtures...\n" % fixture_name)
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format.\n" %
(fixture_name, format)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity >= 2:
self.stdout.write("Checking %s for fixtures...\n" % humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
if verbosity >= 3:
self.stdout.write("Trying %s for %s fixture '%s'...\n" % \
(humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
if label_found:
fixture.close()
self.stderr.write(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir))))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
else:
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
try:
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
objects_in_fixture += 1
if router.allow_syncdb(using, obj.object.__class__):
loaded_objects_in_fixture += 1
models.add(obj.object.__class__)
obj.save(using=using)
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
self.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)\n" %
(fixture_name)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
except Exception, e:
if verbosity >= 2:
self.stdout.write("No %s fixture '%s' in %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
# If we found even one object in a fixture, we need to reset the
# database sequences.
if loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity >= 2:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit(using=using)
transaction.leave_transaction_management(using=using)
if fixture_object_count == 0:
if verbosity >= 1:
self.stdout.write("No fixtures found.\n")
else:
if verbosity >= 1:
if fixture_object_count == loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)\n" % (
loaded_object_count, fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)\n" % (
loaded_object_count, fixture_object_count, fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
| bsd-3-clause | 844e9250485b541c9d5d36ade418b6b5 | 44.440329 | 120 | 0.494747 | 5.112037 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/forms/tests/error_messages.py | 50 | 10684 | # -*- coding: utf-8 -*-
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import *
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils import unittest
class AssertFormErrorsMixin(object):
def assertFormErrors(self, expected, the_callable, *args, **kwargs):
try:
the_callable(*args, **kwargs)
self.fail("Testing the 'clean' method on %s failed to raise a ValidationError.")
except ValidationError, e:
self.assertEqual(e.messages, expected)
class FormsErrorMessagesTestCase(unittest.TestCase, AssertFormErrorsMixin):
def test_charfield(self):
e = {
'required': 'REQUIRED',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = CharField(min_length=5, max_length=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors([u'LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_integerfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = IntegerField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors([u'MAX VALUE IS 10'], f.clean, '11')
def test_floatfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = FloatField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors([u'MAX VALUE IS 10'], f.clean, '11')
def test_decimalfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
'max_digits': 'MAX DIGITS IS %s',
'max_decimal_places': 'MAX DP IS %s',
'max_whole_digits': 'MAX DIGITS BEFORE DP IS %s',
}
f = DecimalField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors([u'MAX VALUE IS 10'], f.clean, '11')
f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
self.assertFormErrors([u'MAX DIGITS IS 4'], f2.clean, '123.45')
self.assertFormErrors([u'MAX DP IS 2'], f2.clean, '1.234')
self.assertFormErrors([u'MAX DIGITS BEFORE DP IS 2'], f2.clean, '123.4')
def test_datefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
def test_timefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = TimeField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
def test_datetimefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateTimeField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
def test_regexfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = RegexField(r'^\d+$', min_length=5, max_length=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abcde')
self.assertFormErrors([u'LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors([u'LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_emailfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = EmailField(min_length=8, max_length=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abcdefgh')
self.assertFormErrors([u'LENGTH 7, MIN LENGTH 8'], f.clean, 'a@b.com')
self.assertFormErrors([u'LENGTH 11, MAX LENGTH 10'], f.clean, 'aye@bee.com')
def test_filefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'missing': 'MISSING',
'empty': 'EMPTY FILE',
}
f = FileField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'EMPTY FILE'], f.clean, SimpleUploadedFile('name', None))
self.assertFormErrors([u'EMPTY FILE'], f.clean, SimpleUploadedFile('name', ''))
def test_urlfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'invalid_link': 'INVALID LINK',
}
f = URLField(verify_exists=True, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc.c')
self.assertFormErrors([u'INVALID LINK'], f.clean, 'http://www.broken.djangoproject.com')
def test_booleanfield(self):
e = {
'required': 'REQUIRED',
}
f = BooleanField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
def test_choicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
}
f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'b IS INVALID CHOICE'], f.clean, 'b')
def test_multiplechoicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST',
}
f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'NOT A LIST'], f.clean, 'b')
self.assertFormErrors([u'b IS INVALID CHOICE'], f.clean, ['b'])
def test_splitdatetimefield(self):
e = {
'required': 'REQUIRED',
'invalid_date': 'INVALID DATE',
'invalid_time': 'INVALID TIME',
}
f = SplitDateTimeField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID DATE', u'INVALID TIME'], f.clean, ['a', 'b'])
def test_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = IPAddressField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_subclassing_errorlist(self):
class TestForm(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def clean(self):
raise ValidationError("I like to be awkward.")
class CustomErrorList(util.ErrorList):
def __unicode__(self):
return self.as_divs()
def as_divs(self):
if not self: return u''
return mark_safe(u'<div class="error">%s</div>' % ''.join([u'<p>%s</p>' % e for e in self]))
# This form should print errors the default way.
form1 = TestForm({'first_name': 'John'})
self.assertEqual(str(form1['last_name'].errors), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(str(form1.errors['__all__']), '<ul class="errorlist"><li>I like to be awkward.</li></ul>')
# This one should wrap error groups in the customized way.
form2 = TestForm({'first_name': 'John'}, error_class=CustomErrorList)
self.assertEqual(str(form2['last_name'].errors), '<div class="error"><p>This field is required.</p></div>')
self.assertEqual(str(form2.errors['__all__']), '<div class="error"><p>I like to be awkward.</p></div>')
class ModelChoiceFieldErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
from regressiontests.forms.models import ChoiceModel
c1 = ChoiceModel.objects.create(pk=1, name='a')
c2 = ChoiceModel.objects.create(pk=2, name='b')
c3 = ChoiceModel.objects.create(pk=3, name='c')
# ModelChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': 'INVALID CHOICE',
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID CHOICE'], f.clean, '4')
# ModelMultipleChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': '%s IS INVALID CHOICE',
'list': 'NOT A LIST OF VALUES',
}
f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'NOT A LIST OF VALUES'], f.clean, '3')
self.assertFormErrors([u'4 IS INVALID CHOICE'], f.clean, ['4'])
| bsd-3-clause | 2cd30a1a1940b4e6b2335883f0fd94fc | 41.229249 | 119 | 0.576376 | 3.707148 | false | true | false | false |
django-nonrel/django-nonrel | django/contrib/sites/models.py | 387 | 2867 | from django.db import models
from django.utils.translation import ugettext_lazy as _
SITE_CACHE = {}
class SiteManager(models.Manager):
def get_current(self):
"""
Returns the current ``Site`` based on the SITE_ID in the
project's settings. The ``Site`` object is cached the first
time it's retrieved from the database.
"""
from django.conf import settings
try:
sid = settings.SITE_ID
except AttributeError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You're using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting to fix this error.")
try:
current_site = SITE_CACHE[sid]
except KeyError:
current_site = self.get(pk=sid)
SITE_CACHE[sid] = current_site
return current_site
def clear_cache(self):
"""Clears the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
class Site(models.Model):
domain = models.CharField(_('domain name'), max_length=100)
name = models.CharField(_('display name'), max_length=50)
objects = SiteManager()
class Meta:
db_table = 'django_site'
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('domain',)
def __unicode__(self):
return self.domain
def save(self, *args, **kwargs):
super(Site, self).save(*args, **kwargs)
# Cached information will likely be incorrect now.
if self.id in SITE_CACHE:
del SITE_CACHE[self.id]
def delete(self):
pk = self.pk
super(Site, self).delete()
try:
del SITE_CACHE[pk]
except KeyError:
pass
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __unicode__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
return current_site
| bsd-3-clause | 00419150de256afd476779458285a10e | 29.178947 | 203 | 0.62016 | 4.298351 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/sitemaps/views.py | 232 | 2084 | from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.utils.encoding import smart_str
from django.core.paginator import EmptyPage, PageNotAnInteger
def index(request, sitemaps, template_name='sitemap_index.xml'):
current_site = get_current_site(request)
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
site.request = request
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string(template_name, {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None, template_name='sitemap.xml'):
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page=page, site=current_site))
else:
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string(template_name, {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
| bsd-3-clause | 63206ef53591de72647b4f170d95b86c | 43.340426 | 112 | 0.644434 | 3.873606 | false | false | false | false |
django-nonrel/django-nonrel | django/http/__init__.py | 1 | 27956 | import datetime
import os
import re
import time
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin, urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
import Cookie
# httponly support exists in Python 2.6's Cookie library,
# but not in Python 2.4 or 2.5.
_morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly')
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
_tc.load('f:oo')
_cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output()
if _morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
if not _morsel_supports_httponly:
class Morsel(Cookie.Morsel):
def __setitem__(self, K, V):
K = K.lower()
if K == "httponly":
if V:
# The superclass rejects httponly as a key,
# so we jump to the grandparent.
super(Cookie.Morsel, self).__setitem__(K, V)
else:
super(Morsel, self).__setitem__(K, V)
def OutputString(self, attrs=None):
output = super(Morsel, self).OutputString(attrs)
if "httponly" in self:
output += "; httponly"
return output
class SimpleCookie(Cookie.SimpleCookie):
if not _morsel_supports_httponly:
def __set(self, key, real_value, coded_value):
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = set()
self._BaseCookie__set = self._loose_set
super(SimpleCookie, self).load(rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = Cookie.BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
try:
self._strict_set(key, real_value, coded_value)
except Cookie.CookieError:
self.bad_cookies.add(key)
dict.__setitem__(self, key, Cookie.Morsel())
class CompatCookie(SimpleCookie):
def __init__(self, *args, **kwargs):
super(CompatCookie, self).__init__(*args, **kwargs)
import warnings
warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.",
PendingDeprecationWarning)
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class Http404(Exception):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
if settings.DEBUG:
allowed_hosts = ['*']
else:
allowed_hosts = settings.ALLOWED_HOSTS
if validate_host(host, allowed_hosts):
return host
else:
raise SuspiciousOperation(
"Invalid HTTP_HOST header (you may need to set ALLOWED_HOSTS): %s" % host)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
def _get_raw_post_data(self):
if not hasattr(self, '_raw_post_data'):
if self._read_started:
raise Exception("You cannot access raw_post_data after reading from request's data stream")
try:
content_length = int(self.META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length:
self._raw_post_data = self.read(content_length)
else:
self._raw_post_data = self.read()
self._stream = StringIO(self._raw_post_data)
return self._raw_post_data
raw_post_data = property(_get_raw_post_data)
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_raw_post_data'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_raw_post_data'):
# Use already read data
data = StringIO(self._raw_post_data)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.raw_post_data, self._stream points to a StringIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
# *Important*: do not import settings any earlier because of note
# in core.handlers.modpython.
from django.conf import settings
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
# *Important*: do not import settings at the module level because
# of the note in core.handlers.modpython.
from django.conf import settings
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import django.utils.copycompat as copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie, ignore_parse_errors=True)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype:
content_type = mimetype # For backwards compatibility
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
if not isinstance(content, basestring) and hasattr(content, '__iter__'):
self._container = content
self._is_string = False
else:
self._container = [content]
self._is_string = True
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return self._headers.has_key(header.lower())
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join(self._container)
return smart_str(''.join(self._container), self._charset)
def _set_content(self, value):
self._container = [value]
self._is_string = True
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if not self._is_string:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if not self._is_string:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(chunk) for chunk in self._container])
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to):
super(HttpResponseRedirectBase, self).__init__()
parsed = urlparse(redirect_to)
if parsed[0] and parsed[0] not in self.allowed_schemes:
raise SuspiciousOperation("Unsafe redirect to URL with scheme '%s'" % parsed[0])
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
def validate_host(host, allowed_hosts):
"""
Validate the given host header value for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Return ``True`` for a valid host, ``False`` otherwise.
"""
# All validation is case-insensitive
host = host.lower()
# Basic sanity check
if not host_validation_re.match(host):
return False
# Validate only the domain part.
if host[-1] == ']':
# It's an IPv6 address without a port.
domain = host
else:
domain = host.rsplit(':', 1)[0]
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
domain.endswith(pattern) or domain == pattern[1:]
) or
pattern == domain
)
if match:
return True
return False
| bsd-3-clause | fe6f525fc68a6374f73ac6a736567903 | 36.274667 | 134 | 0.587352 | 4.237042 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/admin_filterspecs/tests.py | 1 | 13279 | from django.contrib.auth.admin import UserAdmin
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.utils.encoding import force_unicode
from models import Book, BoolTest, Employee, Department
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class FilterSpecsTests(TestCase):
def setUp(self):
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
lisa = User.objects.create_user('lisa', 'lisa@example.com')
#Books
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob)
gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002)
gipsy_book.contributors = [self.bob, lisa]
gipsy_book.save()
# BoolTests
self.trueTest = BoolTest.objects.create(completed=True)
self.falseTest = BoolTest.objects.create(completed=False)
self.request_factory = RequestFactory()
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_editable, modeladmin)
def test_AllValuesFilterSpec(self):
modeladmin = BookAdmin(Book, admin.site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure changelist.get_query_set() does not raise IncorrectLookupParameters
queryset = changelist.get_query_set()
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title()), u'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title()), u'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_RelatedFilterSpec_ForeignKey(self):
modeladmin = BookAdmin(Book, admin.site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = ChangeList(request, Book, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_editable, modeladmin)
# Make sure changelist.get_query_set() does not raise IncorrectLookupParameters
queryset = changelist.get_query_set()
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title()), u'author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title()), u'author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_RelatedFilterSpec_ManyToMany(self):
modeladmin = BookAdmin(Book, admin.site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure changelist.get_query_set() does not raise IncorrectLookupParameters
queryset = changelist.get_query_set()
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_unicode(filterspec.title()), u'user')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_unicode(filterspec.title()), u'user')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_RelatedFilterSpec_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, admin.site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure changelist.get_query_set() does not raise IncorrectLookupParameters
queryset = changelist.get_query_set()
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title()), u'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title()), u'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure changelist.get_query_set() does not raise IncorrectLookupParameters
queryset = changelist.get_query_set()
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title()), u'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_unicode(filterspec.title()), u'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_BooleanFilterSpec(self):
modeladmin = BoolTestAdmin(BoolTest, admin.site)
request = self.request_factory.get('/')
changelist = ChangeList(request, BoolTest, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_editable, modeladmin)
# Make sure changelist.get_query_set() does not raise IncorrectLookupParameters
queryset = changelist.get_query_set()
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title()), u'completed')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], False)
self.assertEqual(choices[-1]['query_string'], '?completed__exact=0')
request = self.request_factory.get('/', {'completed__exact': 1})
changelist = self.get_changelist(request, BoolTest, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_unicode(filterspec.title()), u'completed')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?completed__exact=1')
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, admin.site)
dev = Department.objects.create(code='DEV', description='Development')
design = Department.objects.create(code='DSN', description='Design')
john = Employee.objects.create(name='John Blue', department=dev)
jack = Employee.objects.create(name='Jack Red', department=design)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set()
self.assertEqual(list(queryset), [jack, john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title()), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], u'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set()
self.assertEqual(list(queryset), [john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_unicode(filterspec.title()), u'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], u'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], u'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], u'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(admin.ModelAdmin):
list_filter = ('year', 'author', 'contributors')
order_by = '-id'
class BoolTestAdmin(admin.ModelAdmin):
list_filter = ('completed',)
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department'] | bsd-3-clause | cbcd6d0b957f3309729b462ffbb70570 | 47.290909 | 107 | 0.669779 | 4.023939 | false | true | false | false |
django-nonrel/django-nonrel | django/core/management/commands/flush.py | 249 | 3437 | from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlflush`` on the current database."
def handle_noargs(self, **options):
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
call_command('loaddata', 'initial_data', **kwargs)
else:
print "Flush cancelled."
| bsd-3-clause | f7fcc193eadf829ee0fcc5af5ed8402d | 40.914634 | 103 | 0.627 | 4.32327 | false | false | false | false |
django-nonrel/django-nonrel | django/conf/locale/sv/formats.py | 232 | 1365 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| bsd-3-clause | 39bdeb3c6931c11a6f615f4ae8e5f818 | 33.125 | 77 | 0.516484 | 2.55618 | false | false | true | false |
django-nonrel/django-nonrel | django/forms/fields.py | 5 | 37739 | """
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import (TextInput, PasswordInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION)
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField', 'TypedMultipleChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
DeprecationWarning
)
return getattr(formats, name)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class FloatField(IntegerField):
default_error_messages = {
'invalid': _(u'Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in validators.EMPTY_VALUES:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats or formats.get_format('DATE_INPUT_FORMATS'):
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats or formats.get_format('TIME_INPUT_FORMATS'):
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
if value[0] in validators.EMPTY_VALUES and value[1] in validators.EMPTY_VALUES:
return None
value = '%s %s' % tuple(value)
for format in self.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'):
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
self.validators.append(validators.RegexValidator(regex=regex))
class EmailField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
'contradiction': _(u'Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in validators.EMPTY_VALUES:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'])
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in validators.EMPTY_VALUES; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
# verify() must be called immediately after the constructor.
Image.open(file).verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
url_fields = list(urlparse.urlsplit(value))
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
value = urlparse.urlunsplit(url_fields)
url_fields = list(urlparse.urlsplit(value))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlparse.urlunsplit(url_fields)
return super(URLField, self).to_python(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
value = super(BooleanField, self).to_python(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
super(TypedChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
pass
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_unicode(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
value = super(TypedMultipleChoiceField, self).to_python(value)
super(TypedMultipleChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': choice})
return new_value
def validate(self, value):
pass
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
class IPAddressField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
default_validators = [validators.validate_ipv4_address]
class SlugField(CharField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
default_validators = [validators.validate_slug]
| bsd-3-clause | 55273290521a7dad32c42c20233348db | 38.93545 | 134 | 0.608469 | 4.428421 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/utils/datastructures.py | 1 | 9928 | """
Tests for stuff in django.utils.datastructures.
"""
import pickle
import unittest
from django.utils.copycompat import copy
from django.utils.datastructures import *
class DatastructuresTestCase(unittest.TestCase):
def assertRaisesErrorWithMessage(self, error, message, callable,
*args, **kwargs):
self.assertRaises(error, callable, *args, **kwargs)
try:
callable(*args, **kwargs)
except error, e:
self.assertEqual(message, str(e))
class SortedDictTests(DatastructuresTestCase):
def setUp(self):
self.d1 = SortedDict()
self.d1[7] = 'seven'
self.d1[1] = 'one'
self.d1[9] = 'nine'
self.d2 = SortedDict()
self.d2[1] = 'one'
self.d2[9] = 'nine'
self.d2[0] = 'nil'
self.d2[7] = 'seven'
def test_basic_methods(self):
self.assertEqual(self.d1.keys(), [7, 1, 9])
self.assertEqual(self.d1.values(), ['seven', 'one', 'nine'])
self.assertEqual(self.d1.items(), [(7, 'seven'), (1, 'one'), (9, 'nine')])
def test_overwrite_ordering(self):
""" Overwriting an item keeps it's place. """
self.d1[1] = 'ONE'
self.assertEqual(self.d1.values(), ['seven', 'ONE', 'nine'])
def test_append_items(self):
""" New items go to the end. """
self.d1[0] = 'nil'
self.assertEqual(self.d1.keys(), [7, 1, 9, 0])
def test_delete_and_insert(self):
"""
Deleting an item, then inserting the same key again will place it
at the end.
"""
del self.d2[7]
self.assertEqual(self.d2.keys(), [1, 9, 0])
self.d2[7] = 'lucky number 7'
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_change_keys(self):
"""
Changing the keys won't do anything, it's only a copy of the
keys dict.
"""
k = self.d2.keys()
k.remove(9)
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_init_keys(self):
"""
Initialising a SortedDict with two keys will just take the first one.
A real dict will actually take the second value so we will too, but
we'll keep the ordering from the first key found.
"""
tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
d = SortedDict(tuples)
self.assertEqual(d.keys(), [2, 1])
real_dict = dict(tuples)
self.assertEqual(sorted(real_dict.values()), ['one', 'second-two'])
# Here the order of SortedDict values *is* what we are testing
self.assertEqual(d.values(), ['second-two', 'one'])
def test_overwrite(self):
self.d1[1] = 'not one'
self.assertEqual(self.d1[1], 'not one')
self.assertEqual(self.d1.keys(), self.d1.copy().keys())
def test_append(self):
self.d1[13] = 'thirteen'
self.assertEqual(
repr(self.d1),
"{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}"
)
def test_pop(self):
self.assertEqual(self.d1.pop(1, 'missing'), 'one')
self.assertEqual(self.d1.pop(1, 'missing'), 'missing')
# We don't know which item will be popped in popitem(), so we'll
# just check that the number of keys has decreased.
l = len(self.d1)
self.d1.popitem()
self.assertEqual(l - len(self.d1), 1)
def test_dict_equality(self):
d = SortedDict((i, i) for i in xrange(3))
self.assertEqual(d, {0: 0, 1: 1, 2: 2})
def test_tuple_init(self):
d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")
def test_pickle(self):
self.assertEqual(
pickle.loads(pickle.dumps(self.d1, 2)),
{7: 'seven', 1: 'one', 9: 'nine'}
)
def test_clear(self):
self.d1.clear()
self.assertEqual(self.d1, {})
self.assertEqual(self.d1.keyOrder, [])
class MergeDictTests(DatastructuresTestCase):
def test_simple_mergedict(self):
d1 = {'chris':'cool', 'camri':'cute', 'cotton':'adorable',
'tulip':'snuggable', 'twoofme':'firstone'}
d2 = {'chris2':'cool2', 'camri2':'cute2', 'cotton2':'adorable2',
'tulip2':'snuggable2'}
d3 = {'chris3':'cool3', 'camri3':'cute3', 'cotton3':'adorable3',
'tulip3':'snuggable3'}
d4 = {'twoofme': 'secondone'}
md = MergeDict(d1, d2, d3)
self.assertEqual(md['chris'], 'cool')
self.assertEqual(md['camri'], 'cute')
self.assertEqual(md['twoofme'], 'firstone')
md2 = md.copy()
self.assertEqual(md2['chris'], 'cool')
def test_mergedict_merges_multivaluedict(self):
""" MergeDict can merge MultiValueDicts """
multi1 = MultiValueDict({'key1': ['value1'],
'key2': ['value2', 'value3']})
multi2 = MultiValueDict({'key2': ['value4'],
'key4': ['value5', 'value6']})
mm = MergeDict(multi1, multi2)
# Although 'key2' appears in both dictionaries,
# only the first value is used.
self.assertEqual(mm.getlist('key2'), ['value2', 'value3'])
self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
self.assertEqual(mm.getlist('undefined'), [])
self.assertEqual(sorted(mm.keys()), ['key1', 'key2', 'key4'])
self.assertEqual(len(mm.values()), 3)
self.assertTrue('value1' in mm.values())
self.assertEqual(sorted(mm.items(), key=lambda k: k[0]),
[('key1', 'value1'), ('key2', 'value3'),
('key4', 'value6')])
self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
[('key1', ['value1']),
('key2', ['value2', 'value3']),
('key4', ['value5', 'value6'])])
class MultiValueDictTests(DatastructuresTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(list(d.iteritems()),
[('position', 'Developer'), ('name', 'Simon')])
self.assertEqual(list(d.iterlists()),
[('position', ['Developer']),
('name', ['Adrian', 'Simon'])])
# MultiValueDictKeyError: "Key 'lastname' not found in
# <MultiValueDict: {'position': ['Developer'],
# 'name': ['Adrian', 'Simon']}>"
self.assertRaisesErrorWithMessage(MultiValueDictKeyError,
'"Key \'lastname\' not found in <MultiValueDict: {\'position\':'\
' [\'Developer\'], \'name\': [\'Adrian\', \'Simon\']}>"',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(d.values(), ['Developer', 'Simon', 'Willison'])
self.assertEqual(list(d.itervalues()),
['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
class DotExpandedDictTests(DatastructuresTestCase):
def test_dotexpandeddict(self):
d = DotExpandedDict({'person.1.firstname': ['Simon'],
'person.1.lastname': ['Willison'],
'person.2.firstname': ['Adrian'],
'person.2.lastname': ['Holovaty']})
self.assertEqual(d['person']['1']['lastname'], ['Willison'])
self.assertEqual(d['person']['2']['lastname'], ['Holovaty'])
self.assertEqual(d['person']['2']['firstname'], ['Adrian'])
class ImmutableListTests(DatastructuresTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesErrorWithMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesErrorWithMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(DatastructuresTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual("Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a')
| bsd-3-clause | 26571b59fd2e1a01f47b6f15d3eeb2a7 | 34.081272 | 82 | 0.539988 | 3.597101 | false | true | false | false |
django-nonrel/django-nonrel | django/utils/timesince.py | 319 | 2698 | import datetime
import time
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ungettext, ugettext
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
if getattr(d, 'tzinfo', None):
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
return timesince(now, d)
| bsd-3-clause | ee8dc73bdb1f7920b3ff4b7f0260ed91 | 38.101449 | 94 | 0.604151 | 3.631225 | false | false | false | false |
django-nonrel/django-nonrel | django/utils/datastructures.py | 4 | 15570 | from types import GeneratorType
from django.utils.copycompat import copy, deepcopy
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
import django.utils.copycompat as copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause | 395442c361a330e4acc96547cc51ff6c | 30.202405 | 131 | 0.547206 | 4.243663 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/null_fk_ordering/tests.py | 92 | 2012 | from django.test import TestCase
from regressiontests.null_fk_ordering.models import *
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
article_1 = Article.objects.create(title='No author on this article')
article_2 = Article.objects.create(author=author_1, title='This article written by Tom Jones')
article_3 = Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertTrue(len(list(Article.objects.all())) == 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
c3 = Comment.objects.create(comment_text='Another first comment')
c4 = Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertTrue(len(list(Comment.objects.all())) == 4)
| bsd-3-clause | abe2ee712ef02f3b31e16bc27fdd87ed | 50.589744 | 102 | 0.678926 | 3.96063 | false | true | false | false |
django-nonrel/django-nonrel | django/shortcuts/__init__.py | 254 | 4642 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the apropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return redirect_class(to.get_absolute_url())
# Next try a reverse URL resolution.
try:
return redirect_class(urlresolvers.reverse(to, args=args, kwargs=kwargs))
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return redirect_class(to)
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
| bsd-3-clause | 9ade70de8891035ac652d8da5f8b4580 | 34.707692 | 90 | 0.67277 | 4.227687 | false | false | false | false |
django-nonrel/django-nonrel | django/core/files/storage.py | 158 | 9878 | import os
import errno
import urlparse
import itertools
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
from django.utils.encoding import force_unicode, filepath_to_uri
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils.text import get_valid_filename
from django.utils._os import safe_join
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb', mixin=None):
"""
Retrieves the specified file from storage, using the optional mixin
class to customize what features are available on the File returned.
"""
file = self._open(name, mode)
if mixin:
# Add the mixin as a parent class of the File returned from storage.
file.__class__ = type(mixin.__name__, (mixin, file.__class__), {})
return file
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_unicode(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, count.next(), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None):
if location is None:
location = settings.MEDIA_ROOT
if base_url is None:
base_url = settings.MEDIA_URL
self.location = os.path.abspath(location)
self.base_url = base_url
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
fd = os.open(full_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0))
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
os.write(fd, chunk)
finally:
locks.unlock(fd)
os.close(fd)
except OSError, e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
def delete(self, name):
name = self.path(name)
# If the file exists, delete it from the filesystem.
if os.path.exists(name):
os.remove(name)
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self.base_url, filepath_to_uri(name))
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
if import_path is None:
import_path = settings.DEFAULT_FILE_STORAGE
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a storage module." % import_path)
module, classname = import_path[:dot], import_path[dot+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing storage module %s: "%s"' % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Storage module "%s" does not define a "%s" class.' % (module, classname))
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| bsd-3-clause | 8bba6e99c51917efae11f48c4840bcd4 | 34.92 | 110 | 0.608828 | 4.475759 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/gis/geos/prototypes/predicates.py | 624 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| bsd-3-clause | b567108986cfb1c778c5d6ce8b50ccd1 | 39.386364 | 78 | 0.760833 | 3.725367 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.