commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
334a55bf7fd31ed5937744a437e42eee597bd7e1 | Bump version 0.17.0rc14 --> 0.17.0rc15 | lbryio/lbry,lbryio/lbry,lbryio/lbry | lbrynet/__init__.py | lbrynet/__init__.py | import logging
__version__ = "0.17.0rc15"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
| import logging
__version__ = "0.17.0rc14"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
| mit | Python |
f37ca31fd2a0c14b01d4329cbcaa4dae954e3965 | Simplify test case | mwilliamson/funk | test/funk/test_tools.py | test/funk/test_tools.py | from nose.tools import assert_raises
from funk.tools import assert_raises_str
from funk.tools import assert_that
from funk.matchers import Matcher
def test_assert_raises_str_passes_if_test_raises_specified_exception_with_correct_message():
def func():
raise AssertionError("Oh noes!")
assert_raises_str(AssertionError, "Oh noes!", func)
def test_assert_raises_str_passed_if_test_raises_subtype_of_specified_exception_with_correct_message():
def func():
raise AssertionError("Oh noes!")
assert_raises_str(BaseException, "Oh noes!", func)
def test_assert_raises_str_fails_if_wrong_exception_raised():
def func():
raise TypeError("Oh noes!")
assert_raises(TypeError, lambda: assert_raises_str(KeyError, "Oh noes!", func))
def test_assert_raises_str_fails_if_no_exception_raised():
assert_raises(AssertionError, lambda: assert_raises_str(TypeError, "Oh noes!", lambda: None))
def test_assert_raises_str_fails_if_messages_do_not_match():
def func():
raise TypeError("Oh dear.")
assert_raises(AssertionError, lambda: assert_raises_str(TypeError, "Oh noes!", func))
def test_assert_that_passes_if_matcher_returns_true():
class TrueMatcher(Matcher):
def matches(self, value, failure_out):
return True
assert_that("Anything", TrueMatcher())
def test_assert_that_raises_assertion_error_if_matcher_returns_false():
class FalseMatcher(Matcher):
def matches(self, value, failure_out):
return False
assert_raises(AssertionError, lambda: assert_that("Anything", FalseMatcher()))
def test_assert_that_raises_assertion_error_describing_expected_and_actual_results():
class HasZeroLength(Matcher):
def matches(self, value, failure_out):
if len(value):
failure_out.append("got <value of length %s>" % len(value))
return False
return True
def __str__(self):
return "<value of length zero>"
assert_that([], HasZeroLength())
assert_raises_str(AssertionError,
"Expected: <value of length zero>\nbut: got <value of length 8>",
lambda: assert_that("Anything", HasZeroLength()))
| from nose.tools import assert_raises
from funk.tools import assert_raises_str
from funk.tools import assert_that
from funk.matchers import Matcher
def test_assert_raises_str_passes_if_test_raises_specified_exception_with_correct_message():
def func():
raise AssertionError("Oh noes!")
assert_raises_str(AssertionError, "Oh noes!", func)
def test_assert_raises_str_passed_if_test_raises_subtype_of_specified_exception_with_correct_message():
def func():
raise AssertionError("Oh noes!")
assert_raises_str(BaseException, "Oh noes!", func)
def test_assert_raises_str_fails_if_wrong_exception_raised():
def func():
raise TypeError("Oh noes!")
assert_raises(TypeError, lambda: assert_raises_str(KeyError, "Oh noes!", func))
def test_assert_raises_str_fails_if_no_exception_raised():
assert_raises(AssertionError, lambda: assert_raises_str(TypeError, "Oh noes!", lambda: None))
def test_assert_raises_str_fails_if_messages_do_not_match():
def func():
raise TypeError("Oh dear.")
assert_raises(AssertionError, lambda: assert_raises_str(TypeError, "Oh noes!", func))
def test_assert_that_passes_if_matcher_returns_true():
class TrueMatcher(Matcher):
def matches(self, value, failure_out):
return True
assert_that("Anything", TrueMatcher())
def test_assert_that_raises_assertion_error_if_matcher_returns_false():
class FalseMatcher(Matcher):
def matches(self, value, failure_out):
return False
assert_raises(AssertionError, lambda: assert_that("Anything", FalseMatcher()))
def test_assert_that_raises_assertion_error_describing_expected_and_actual_results():
class HasZeroLength(Matcher):
def matches(self, value, failure_out):
passed = len(value) == 0
if not passed:
failure_out.append("got <value of length %s>" % len(value))
return passed
def __str__(self):
return "<value of length zero>"
assert_that([], HasZeroLength())
assert_raises_str(AssertionError,
"Expected: <value of length zero>\nbut: got <value of length 8>",
lambda: assert_that("Anything", HasZeroLength()))
| bsd-2-clause | Python |
986398ee03d77b81129cfcdef475a53680812207 | Add boolean operators and boolean expr testing | admk/soap | soap/expr/common.py | soap/expr/common.py | """
.. module:: soap.expr.common
:synopsis: Common definitions for expressions.
"""
ADD_OP = '+'
SUBTRACT_OP = '-'
UNARY_SUBTRACT_OP = '-'
MULTIPLY_OP = '*'
DIVIDE_OP = '/'
EQUAL_OP = '=='
GREATER_OP = '>'
LESS_OP = '<'
UNARY_NEGATION_OP = '!'
AND_OP = '&'
OR_OP = '|'
BARRIER_OP = '#'
OPERATORS = [ADD_OP, MULTIPLY_OP]
BOOLEAN_OPERATORS = [
EQUAL_OP, GREATER_OP, LESS_OP, UNARY_NEGATION_OP, AND_OP, OR_OP
]
ARITHMETIC_OPERATORS = [
ADD_OP, SUBTRACT_OP, UNARY_SUBTRACT_OP, MULTIPLY_OP, DIVIDE_OP
]
UNARY_OPERATORS = [UNARY_SUBTRACT_OP, UNARY_NEGATION_OP]
ASSOCIATIVITY_OPERATORS = [ADD_OP, MULTIPLY_OP]
COMMUTATIVITY_OPERATORS = ASSOCIATIVITY_OPERATORS
COMMUTATIVE_DISTRIBUTIVITY_OPERATOR_PAIRS = [(MULTIPLY_OP, ADD_OP)]
# left-distributive: a * (b + c) == a * b + a * c
LEFT_DISTRIBUTIVITY_OPERATOR_PAIRS = \
COMMUTATIVE_DISTRIBUTIVITY_OPERATOR_PAIRS
# Note that division '/' is only right-distributive over +
RIGHT_DISTRIBUTIVITY_OPERATOR_PAIRS = \
COMMUTATIVE_DISTRIBUTIVITY_OPERATOR_PAIRS
LEFT_DISTRIBUTIVITY_OPERATORS, LEFT_DISTRIBUTION_OVER_OPERATORS = \
list(zip(*LEFT_DISTRIBUTIVITY_OPERATOR_PAIRS))
RIGHT_DISTRIBUTIVITY_OPERATORS, RIGHT_DISTRIBUTION_OVER_OPERATORS = \
list(zip(*RIGHT_DISTRIBUTIVITY_OPERATOR_PAIRS))
def is_expr(e):
return is_arith_expr(e) or is_bool_expr(e)
def is_arith_expr(e):
"""Check if `e` is an expression."""
if is_bool_expr(e):
return False
from soap.expr.arith import Expr
return isinstance(e, Expr)
def is_bool_expr(e):
"""Check if `e` is a boolean expression."""
from soap.expr.bool import BoolExpr
return isinstance(e, BoolExpr)
def concat_multi_expr(*expr_args):
"""Concatenates multiple expressions into a single expression by using the
barrier operator.
"""
from soap.expr.arith import Expr
me = None
for e in expr_args:
e = Expr(e)
me = me | e if me else e
return me
def split_multi_expr(e):
"""Splits the single expression into multiple expressions."""
if e.op != BARRIER_OP:
return [e]
return split_multi_expr(e.a1) + split_multi_expr(e.a2)
| """
.. module:: soap.expr.common
:synopsis: Common definitions for expressions.
"""
ADD_OP = '+'
SUBTRACT_OP = '-'
UNARY_SUBTRACT_OP = '-'
MULTIPLY_OP = '*'
DIVIDE_OP = '/'
BARRIER_OP = '|'
OPERATORS = [ADD_OP, MULTIPLY_OP]
UNARY_OPERATORS = [UNARY_SUBTRACT_OP]
ASSOCIATIVITY_OPERATORS = [ADD_OP, MULTIPLY_OP]
COMMUTATIVITY_OPERATORS = ASSOCIATIVITY_OPERATORS
COMMUTATIVE_DISTRIBUTIVITY_OPERATOR_PAIRS = [(MULTIPLY_OP, ADD_OP)]
# left-distributive: a * (b + c) == a * b + a * c
LEFT_DISTRIBUTIVITY_OPERATOR_PAIRS = \
COMMUTATIVE_DISTRIBUTIVITY_OPERATOR_PAIRS
# Note that division '/' is only right-distributive over +
RIGHT_DISTRIBUTIVITY_OPERATOR_PAIRS = \
COMMUTATIVE_DISTRIBUTIVITY_OPERATOR_PAIRS
LEFT_DISTRIBUTIVITY_OPERATORS, LEFT_DISTRIBUTION_OVER_OPERATORS = \
list(zip(*LEFT_DISTRIBUTIVITY_OPERATOR_PAIRS))
RIGHT_DISTRIBUTIVITY_OPERATORS, RIGHT_DISTRIBUTION_OVER_OPERATORS = \
list(zip(*RIGHT_DISTRIBUTIVITY_OPERATOR_PAIRS))
def is_expr(e):
"""Check if `e` is an expression."""
from soap.expr.arith import Expr
return isinstance(e, Expr)
def concat_multi_expr(*expr_args):
"""Concatenates multiple expressions into a single expression by using the
barrier operator `|`.
"""
from soap.expr.arith import Expr
me = None
for e in expr_args:
e = Expr(e)
me = me | e if me else e
return me
def split_multi_expr(e):
"""Splits the single expression into multiple expressions."""
if e.op != BARRIER_OP:
return [e]
return split_multi_expr(e.a1) + split_multi_expr(e.a2)
| mit | Python |
d03b385b5d23c321ee1d4bd2020be1452e8c1cab | Remove Python 2.4 support monkey patch and bump rev | reddec/pika,skftn/pika,Tarsbot/pika,shinji-s/pika,jstnlef/pika,zixiliuyue/pika,fkarb/pika-python3,renshawbay/pika-python3,vrtsystems/pika,Zephor5/pika,pika/pika,vitaly-krugl/pika,knowsis/pika,hugoxia/pika,benjamin9999/pika | pika/__init__.py | pika/__init__.py | # ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
__version__ = '0.9.13p2'
from pika.connection import ConnectionParameters
from pika.connection import URLParameters
from pika.credentials import PlainCredentials
from pika.spec import BasicProperties
from pika.adapters.base_connection import BaseConnection
from pika.adapters.asyncore_connection import AsyncoreConnection
from pika.adapters.blocking_connection import BlockingConnection
from pika.adapters.select_connection import SelectConnection
| # ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
__version__ = '0.9.13p1'
from pika.connection import ConnectionParameters
from pika.connection import URLParameters
from pika.credentials import PlainCredentials
from pika.spec import BasicProperties
from pika.adapters.base_connection import BaseConnection
from pika.adapters.asyncore_connection import AsyncoreConnection
from pika.adapters.blocking_connection import BlockingConnection
from pika.adapters.select_connection import SelectConnection
# Python 2.4 support: add struct.unpack_from if it's missing.
try:
import struct
getattr(struct, "unpack_from")
except AttributeError:
def _unpack_from(fmt, buf, offset=0):
slice = buffer(buf, offset, struct.calcsize(fmt))
return struct.unpack(fmt, slice)
struct.unpack_from = _unpack_from
| bsd-3-clause | Python |
b2f0670e1859368f0efb7b3cc485a41b1e9cb14a | Update tarot.py | rmmh/skybot,parkrrr/skybot,jmgao/skybot | plugins/tarot.py | plugins/tarot.py | """
🔮 Spooky fortunes and assistance for witches
"""
from util import hook, http
@hook.command
def tarot(inp):
".tarot <cardname> -- finds a card by name"
try:
card = http.get_json(
"https://tarot-api.com/find/" + inp
)
except http.HTTPError:
return "the spirits are displeased."
return card["name"] + ": " + ", ".join(card["keywords"])
@hook.command
def fortune():
".fortune -- returns one random card and it's fortune"
try:
cards = http.get_json("https://tarot-api.com/draw/1")
except http.HTTPError:
return "the spirits are displeased."
card = cards[0]
return card["name"] + ": " + ", ".join(card["keywords"])
| """
🔮 Spooky fortunes and assistance for witches
"""
from util import hook, http
@hook.command
def tarot(inp):
".tarot <cardname> -- finds a card by name"
try:
card = http.get_json(
"https://tarot-api.com/find/{search}".format(
search=inp
)
)
except http.HTTPError:
return "the spirits are displeased."
return card["name"] + ": " + ", ".join(card["keywords"])
def fortune():
".fortune -- returns one random card and it's fortune"
try:
cards = http.get_json("https://tarot-api.com/draw/1")
except http.HTTPError:
return "the spirits are displeased."
card = cards[0]
return card["name"] + ": " + ", ".join(card["keywords"]) | unlicense | Python |
ff4239ecd4c46a59c819a15c11d87f5deca71ad6 | use python_2_unicode_compatible for model str | appsembler/django-souvenirs | souvenirs/models.py | souvenirs/models.py | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Souvenir(models.Model):
"""
One instance of seeing an active user
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
when = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
ordering = ['-when']
def __str__(self):
return 'user={} when={}'.format(self.user_id, self.when)
| from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.db import models
from django.utils import timezone
class Souvenir(models.Model):
"""
One instance of seeing an active user
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
when = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
ordering = ['-when']
def __repr__(self):
return '<{} user={} when={!r}>'.format(
self.__class__.__name__, self.user.username, self.when)
| mit | Python |
70fd93901be2a7d6662525fcd5b3914c2ffbc7d4 | add open_file into tests run through old runner | ocefpaf/OWSLib,jachym/OWSLib,menegon/OWSLib,mbertrand/OWSLib,bird-house/OWSLib,daf/OWSLib,datagovuk/OWSLib,datagovuk/OWSLib,Jenselme/OWSLib,jaygoldfinch/OWSLib,kalxas/OWSLib,kwilcox/OWSLib,JuergenWeichand/OWSLib,geographika/OWSLib,tomkralidis/OWSLib,geopython/OWSLib,robmcmullen/OWSLib,daf/OWSLib,gfusca/OWSLib,dblodgett-usgs/OWSLib,datagovuk/OWSLib,b-cube/OWSLib,QuLogic/OWSLib,daf/OWSLib,jaygoldfinch/OWSLib,KeyproOy/OWSLib | tests/runalldoctests.py | tests/runalldoctests.py | import doctest
import getopt
import glob
import sys
try:
import pkg_resources
pkg_resources.require('OWSLib')
except (ImportError, pkg_resources.DistributionNotFound):
pass
def open_file(filename, mode='r'):
"""Helper function to open files from within the tests package."""
import os
return open(os.path.join(os.path.dirname(__file__), filename), mode)
EXTRA_GLOBALS = {'open_file': open_file}
def run(pattern):
if pattern is None:
testfiles = glob.glob('*.txt')
else:
testfiles = glob.glob(pattern)
for file in testfiles:
doctest.testfile(file, extraglobs=EXTRA_GLOBALS)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "t:v")
except getopt.GetoptError:
print "Usage: python runalldoctests.py [-t GLOB_PATTERN]"
sys.exit(2)
pattern = None
for o, a in opts:
if o == '-t':
pattern = a
run(pattern)
| import doctest
import getopt
import glob
import sys
try:
import pkg_resources
pkg_resources.require('OWSLib')
except (ImportError, pkg_resources.DistributionNotFound):
pass
def run(pattern):
if pattern is None:
testfiles = glob.glob('*.txt')
else:
testfiles = glob.glob(pattern)
for file in testfiles:
doctest.testfile(file)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "t:v")
except getopt.GetoptError:
print "Usage: python runalldoctests.py [-t GLOB_PATTERN]"
sys.exit(2)
pattern = None
for o, a in opts:
if o == '-t':
pattern = a
run(pattern)
| bsd-3-clause | Python |
c3f7ae17289c38fcdfad45c0b71f553f8004c0fd | Update list test. | m110/grafcli,m110/grafcli | tests/test_resources.py | tests/test_resources.py | #!/usr/bin/python3
import os
import sys
import unittest
from unittest.mock import patch
LIB_PATH = os.path.dirname(os.path.realpath(__file__)) + '/../'
CONFIG_PATH = os.path.join(LIB_PATH, 'grafcli.conf.example')
sys.path.append(LIB_PATH)
from climb.config import load_config_file
load_config_file(CONFIG_PATH)
from grafcli.resources import Resources
from grafcli.exceptions import InvalidPath
from grafcli.resources.local import LocalResources
from grafcli.resources.templates import DashboardsTemplates, RowsTemplates, PanelTemplates
class ResourcesTest(unittest.TestCase):
def setUp(self):
self.remote_patcher = patch('grafcli.resources.resources.RemoteResources')
self.remote_resources = self.remote_patcher.start()
def tearDown(self):
self.remote_patcher.stop()
def test_list(self):
r = Resources()
self.assertEqual(r.list(None), ['backups', 'remote', 'templates'])
self.assertEqual(r.list('remote'), ['host.example.com'])
self.assertEqual(r.list('templates'), ('dashboards', 'rows', 'panels'))
with self.assertRaises(InvalidPath):
r.list('invalid_path')
def test_get_empty(self):
r = Resources()
with self.assertRaises(InvalidPath):
r.get(None)
def test_parse_path(self):
r = Resources()
manager, parts = r._parse_path('/backups/a/b/c')
self.assertIsInstance(manager, LocalResources)
self.assertListEqual(parts, ['a', 'b', 'c'])
manager, parts = r._parse_path('/templates/dashboards/a/b')
self.assertIsInstance(manager, DashboardsTemplates)
self.assertListEqual(parts, ['a', 'b'])
manager, parts = r._parse_path('/remote/host.example.com/a/b')
self.remote_resources.assert_called_once_with('host.example.com')
self.assertListEqual(parts, ['a', 'b'])
with self.assertRaises(InvalidPath):
r._parse_path('/invalid/path')
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/python3
import os
import sys
import unittest
from unittest.mock import patch
LIB_PATH = os.path.dirname(os.path.realpath(__file__)) + '/../'
CONFIG_PATH = os.path.join(LIB_PATH, 'grafcli.conf.example')
sys.path.append(LIB_PATH)
from climb.config import load_config_file
load_config_file(CONFIG_PATH)
from grafcli.resources import Resources
from grafcli.exceptions import InvalidPath
from grafcli.resources.local import LocalResources
from grafcli.resources.templates import DashboardsTemplates, RowsTemplates, PanelTemplates
class ResourcesTest(unittest.TestCase):
def setUp(self):
self.remote_patcher = patch('grafcli.resources.resources.RemoteResources')
self.remote_resources = self.remote_patcher.start()
def tearDown(self):
self.remote_patcher.stop()
def test_list(self):
r = Resources()
self.assertEqual(r.list(None), ['backups', 'remote', 'templates'])
self.assertEqual(r.list('remote'), ['host.example.com'])
self.assertEqual(r.list('templates'), ('dashboards', 'rows', 'panels'))
def test_get_empty(self):
r = Resources()
with self.assertRaises(InvalidPath):
r.get(None)
def test_parse_path(self):
r = Resources()
manager, parts = r._parse_path('/backups/a/b/c')
self.assertIsInstance(manager, LocalResources)
self.assertListEqual(parts, ['a', 'b', 'c'])
manager, parts = r._parse_path('/templates/dashboards/a/b')
self.assertIsInstance(manager, DashboardsTemplates)
self.assertListEqual(parts, ['a', 'b'])
manager, parts = r._parse_path('/remote/host.example.com/a/b')
self.remote_resources.assert_called_once_with('host.example.com')
self.assertListEqual(parts, ['a', 'b'])
with self.assertRaises(InvalidPath):
r._parse_path('/invalid/path')
if __name__ == "__main__":
unittest.main()
| mit | Python |
3b36ae57d7521fa6d19174d75fbea5567955f253 | Bump version | toxinu/pyhn,socketubs/pyhn | pyhn/__init__.py | pyhn/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'pyhn'
__version__ = '0.2.1'
__author__ = 'Geoffrey Lehée'
__license__ = 'AGPL3'
__copyright__ = 'Copyright 2013 Geoffrey Lehée'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'pyhn'
__version__ = '0.2.0'
__author__ = 'Geoffrey Lehée'
__license__ = 'AGPL3'
__copyright__ = 'Copyright 2013 Geoffrey Lehée'
| mit | Python |
c22638085d2b4ad0efc6c1bf03b1732604b2b039 | Fix rank2. | drtconway/pykmer | pykmer/sparse.py | pykmer/sparse.py | import array
class sparse:
def __init__(self, B, xs):
self.B = B
self.S = B - 10
self.xs = xs
self.toc = array.array('I', [0 for i in xrange(1024+1)])
for x in xs:
v = x >> self.S
self.toc[v+1] += 1
t = 0
for i in xrange(1024+1):
t += self.toc[i]
self.toc[i] = t
def size(self):
return 1 << self.B
def count(self):
return len(self.xs)
def rank(self, x):
v = x >> self.S
l = self.toc[v]
h = self.toc[v+1] - 1
while h >= l:
m = (h + l) // 2
y = self.xs[m]
if y == x:
return m
if y < x:
l = m + 1
else:
h = m - 1
return l
def rank2(self, x0, x1):
r0 = self.rank(x0)
for r1 in xrange(r0, len(self.xs)):
if self.xs[r1] >= x1:
break
return (r0, r1)
def select(self, i):
assert 0 <= i
assert i < self.count()
return self.xs[i]
| import array
class sparse:
def __init__(self, B, xs):
self.B = B
self.S = B - 10
self.xs = xs
self.toc = array.array('I', [0 for i in xrange(1024+1)])
for x in xs:
v = x >> self.S
self.toc[v+1] += 1
t = 0
for i in xrange(1024+1):
t += self.toc[i]
self.toc[i] = t
def size(self):
return 1 << self.B
def count(self):
return len(self.xs)
def rank(self, x):
v = x >> self.S
l = self.toc[v]
h = self.toc[v+1] - 1
while h >= l:
m = (h + l) // 2
y = self.xs[m]
if y == x:
return m
if y < x:
l = m + 1
else:
h = m - 1
return l
def rank2(self, x0, x1):
r0 = self.rank(x0)
for r1 in xrange(r0, len(self.xs)):
if self.xs[r1] >= x1:
return (r0, r1)
def select(self, i):
assert 0 <= i
assert i < self.count()
return self.xs[i]
| apache-2.0 | Python |
cd6eaa0ee2f04c8d83a2c004f9f414288723bf20 | Disable useless-object-inheritance error | googleapis/google-auth-library-python-oauthlib,googleapis/google-auth-library-python-oauthlib | pylint.config.py | pylint.config.py | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is used to config gcp-devrel-py-tools run-pylint."""
import copy
library_additions = {
'MESSAGES CONTROL': {
'disable': [
'I',
'import-error',
'no-member',
'protected-access',
'redefined-variable-type',
'similarities',
'no-else-return',
'useless-object-inheritance',
],
},
}
library_replacements = {
'MASTER': {
'ignore': ['CVS', '.git', '.cache', '.tox', '.nox'],
'load-plugins': 'pylint.extensions.check_docs',
},
'REPORTS': {
'reports': 'no',
},
'BASIC': {
'method-rgx': '[a-z_][a-z0-9_]{2,40}$',
'function-rgx': '[a-z_][a-z0-9_]{2,40}$',
},
'TYPECHECK': {
'ignored-modules': ['six', 'google.protobuf'],
},
'DESIGN': {
'min-public-methods': '0',
'max-args': '10',
'max-attributes': '15',
},
}
test_additions = copy.deepcopy(library_additions)
test_additions['MESSAGES CONTROL']['disable'].extend([
'missing-docstring',
'no-self-use',
'redefined-outer-name',
'unused-argument',
'no-name-in-module',
])
test_replacements = copy.deepcopy(library_replacements)
test_replacements.setdefault('BASIC', {})
test_replacements['BASIC'].update({
'good-names': ['i', 'j', 'k', 'ex', 'Run', '_', 'fh', 'pytestmark'],
'method-rgx': '[a-z_][a-z0-9_]{2,80}$',
'function-rgx': '[a-z_][a-z0-9_]{2,80}$',
})
ignored_files = ()
| # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is used to config gcp-devrel-py-tools run-pylint."""
import copy
library_additions = {
'MESSAGES CONTROL': {
'disable': [
'I',
'import-error',
'no-member',
'protected-access',
'redefined-variable-type',
'similarities',
'no-else-return',
],
},
}
library_replacements = {
'MASTER': {
'ignore': ['CVS', '.git', '.cache', '.tox', '.nox'],
'load-plugins': 'pylint.extensions.check_docs',
},
'REPORTS': {
'reports': 'no',
},
'BASIC': {
'method-rgx': '[a-z_][a-z0-9_]{2,40}$',
'function-rgx': '[a-z_][a-z0-9_]{2,40}$',
},
'TYPECHECK': {
'ignored-modules': ['six', 'google.protobuf'],
},
'DESIGN': {
'min-public-methods': '0',
'max-args': '10',
'max-attributes': '15',
},
}
test_additions = copy.deepcopy(library_additions)
test_additions['MESSAGES CONTROL']['disable'].extend([
'missing-docstring',
'no-self-use',
'redefined-outer-name',
'unused-argument',
'no-name-in-module',
])
test_replacements = copy.deepcopy(library_replacements)
test_replacements.setdefault('BASIC', {})
test_replacements['BASIC'].update({
'good-names': ['i', 'j', 'k', 'ex', 'Run', '_', 'fh', 'pytestmark'],
'method-rgx': '[a-z_][a-z0-9_]{2,80}$',
'function-rgx': '[a-z_][a-z0-9_]{2,80}$',
})
ignored_files = ()
| apache-2.0 | Python |
336ab517038c680a19bdef697d4be77530f75432 | Update Transfer per 2020-04-30 changes | cloudtools/troposphere,cloudtools/troposphere | troposphere/transfer.py | troposphere/transfer.py | # Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 3.3.0
from . import AWSObject
from . import AWSProperty
from troposphere import Tags
VALID_HOMEDIRECTORY_TYPE = ('LOGICAL', 'PATH')
def validate_homedirectory_type(homedirectory_type):
"""Validate HomeDirectoryType for User"""
if homedirectory_type not in VALID_HOMEDIRECTORY_TYPE: # NOQA
raise ValueError("User HomeDirectoryType must be one of: %s" % # NOQA
", ".join(VALID_HOMEDIRECTORY_TYPE))
return homedirectory_type
class EndpointDetails(AWSProperty):
props = {
'AddressAllocationIds': ([basestring], False),
'SubnetIds': ([basestring], False),
'VpcEndpointId': (basestring, False),
'VpcId': (basestring, False),
}
class IdentityProviderDetails(AWSProperty):
props = {
'InvocationRole': (basestring, True),
'Url': (basestring, True),
}
class Server(AWSObject):
resource_type = "AWS::Transfer::Server"
props = {
'Certificate': (basestring, False),
'EndpointDetails': (EndpointDetails, False),
'EndpointType': (basestring, False),
'IdentityProviderDetails': (IdentityProviderDetails, False),
'IdentityProviderType': (basestring, False),
'LoggingRole': (basestring, False),
'Protocols': ([basestring], False),
'Tags': (Tags, False),
}
class HomeDirectoryMapEntry(AWSProperty):
props = {
'Entry': (basestring, True),
'Target': (basestring, True),
}
class User(AWSObject):
resource_type = "AWS::Transfer::User"
props = {
'HomeDirectory': (basestring, False),
'HomeDirectoryMappings': ([HomeDirectoryMapEntry], False),
'HomeDirectoryType': (validate_homedirectory_type, False),
'Policy': (basestring, False),
'Role': (basestring, True),
'ServerId': (basestring, True),
'SshPublicKeys': ([basestring], False),
'Tags': (Tags, False),
'UserName': (basestring, True),
}
| # Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 3.3.0
from . import AWSObject
from . import AWSProperty
from troposphere import Tags
VALID_HOMEDIRECTORY_TYPE = ('LOGICAL', 'PATH')
def validate_homedirectory_type(homedirectory_type):
"""Validate HomeDirectoryType for User"""
if homedirectory_type not in VALID_HOMEDIRECTORY_TYPE: # NOQA
raise ValueError("User HomeDirectoryType must be one of: %s" % # NOQA
", ".join(VALID_HOMEDIRECTORY_TYPE))
return homedirectory_type
class EndpointDetails(AWSProperty):
props = {
'AddressAllocationIds': ([basestring], False),
'SubnetIds': ([basestring], False),
'VpcEndpointId': (basestring, False),
'VpcId': (basestring, False),
}
class IdentityProviderDetails(AWSProperty):
props = {
'InvocationRole': (basestring, True),
'Url': (basestring, True),
}
class Server(AWSObject):
resource_type = "AWS::Transfer::Server"
props = {
'EndpointDetails': (EndpointDetails, False),
'EndpointType': (basestring, False),
'IdentityProviderDetails': (IdentityProviderDetails, False),
'IdentityProviderType': (basestring, False),
'LoggingRole': (basestring, False),
'Tags': (Tags, False),
}
class HomeDirectoryMapEntry(AWSProperty):
props = {
'Entry': (basestring, True),
'Target': (basestring, True),
}
class User(AWSObject):
resource_type = "AWS::Transfer::User"
props = {
'HomeDirectory': (basestring, False),
'HomeDirectoryMappings': ([HomeDirectoryMapEntry], False),
'HomeDirectoryType': (validate_homedirectory_type, False),
'Policy': (basestring, False),
'Role': (basestring, True),
'ServerId': (basestring, True),
'SshPublicKeys': ([basestring], False),
'Tags': (Tags, False),
'UserName': (basestring, True),
}
| bsd-2-clause | Python |
92158d172f634437a91d26c9cfe2bc4986cc85cf | exclude node | manojklm/pytest-filter | pytest_filter.py | pytest_filter.py | # -*- coding: utf-8 -*-
"""
pytest-filter
*************
"""
from __future__ import print_function
import os
import sys
import time
from path import Path
import pytest
import configparser
def pytest_addoption(parser):
parser.addini('filter_file', 'Location of filter file')
@pytest.mark.trylast
def pytest_configure(config):
if 'filter_file' in config.inicfg:
filter_path = Path(config.inicfg['filter_file'])
if not filter_path.isfile():
raise FileNotFoundError('filter_file: %s' % filter_path)
config._filter = filter_path
config.pluginmanager.register(config._filter)
def pytest_unconfigure(config):
"""un configure the mf_testlink framework plugin"""
_filter = getattr(config, '_filter', None)
if _filter:
del config._filter
config.pluginmanager.unregister(_filter)
def pytest_collection_modifyitems(session, config, items):
""" return custom item/collector for a python object in a module, or None. """
if 'filter_file' not in config.inicfg:
return
remaining = []
deselected = []
con = configparser.ConfigParser(allow_no_value=True)
con.read(config._filter)
xfail_count = 0
nodes = lambda x: [k if v is None else k+':'+v for k, v in x.items()]
for colitem in items:
exclude_test = False
if 'exclude-node' in con.sections():
if colitem.nodeid in nodes(con['exclude-node']):
exclude_test = True
if exclude_test:
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
| # -*- coding: utf-8 -*-
"""
pytest-filter
*************
"""
from __future__ import print_function
import os
import sys
import time
from path import Path
import pytest
import configparser
def pytest_addoption(parser):
parser.addini('filter_file', 'Location of filter file')
@pytest.mark.trylast
def pytest_configure(config):
if 'filter_file' in config.inicfg:
filter_path = Path(config.inicfg['filter_file'])
if not filter_path.isfile():
raise FileNotFoundError('filter_file: %s' % filter_path)
config._filter = filter_path
config.pluginmanager.register(config._filter)
def pytest_unconfigure(config):
"""un configure the mf_testlink framework plugin"""
_filter = getattr(config, '_filter', None)
if _filter:
del config._filter
config.pluginmanager.unregister(_filter)
| mit | Python |
e0836e052aad7f58d958459cfbcbff87826f0e45 | add xml_reflection to the install rule | keulYSMB/urdfdom,shadow-robot/urdfdom,robotology-dependencies/urdfdom,keulYSMB/urdfdom,keulYSMB/urdfdom,robotology-dependencies/urdfdom,robotology-dependencies/urdfdom,shadow-robot/urdfdom,robotology-dependencies/urdfdom,shadow-robot/urdfdom,keulYSMB/urdfdom,shadow-robot/urdfdom | urdf_parser_py/setup.py | urdf_parser_py/setup.py | #!/usr/bin/env python
from distutils.core import setup
d = {'author': u'Thomas Moulard <thomas.moulard@gmail.com>, David Lu <davidlu@wustl.edu>, Kelsey Hawkins <kphawkins@gmail.com>, Antonio El Khoury <aelkhour@laas.fr>, Eric Cousineau <eacousineau@gmail.com>',
'description': 'The urdf_parser_py package contains a Python implementation of the\nurdf_parser modeling various aspects of robot information, specified in the\nXml Robot Description Format (URDF).',
'license': 'BSD',
'maintainer': u'Thomas Moulard',
'maintainer_email': 'thomas.moulard@gmail.com',
'name': 'urdf_parser_py',
'package_dir': {'': 'src'},
'packages': ['urdf_parser_py', 'urdf_parser_py.xml_reflection'],
'url': 'http://ros.org/wiki/urdf_parser_py',
'version': '0.3.0'}
setup(**d)
| #!/usr/bin/env python
from distutils.core import setup
d = {'author': u'Thomas Moulard <thomas.moulard@gmail.com>, David Lu <davidlu@wustl.edu>, Kelsey Hawkins <kphawkins@gmail.com>, Antonio El Khoury <aelkhour@laas.fr>, Eric Cousineau <eacousineau@gmail.com>',
'description': 'The urdf_parser_py package contains a Python implementation of the\nurdf_parser modeling various aspects of robot information, specified in the\nXml Robot Description Format (URDF).',
'license': 'BSD',
'maintainer': u'Thomas Moulard',
'maintainer_email': 'thomas.moulard@gmail.com',
'name': 'urdf_parser_py',
'package_dir': {'': 'src'},
'packages': ['urdf_parser_py'],
'url': 'http://ros.org/wiki/urdf_parser_py',
'version': '0.3.0'}
setup(**d)
| bsd-3-clause | Python |
b34991a2713ea321cbb9ab97aaa482c8002d1549 | change from string module to string methods | scipy/scipy-svn,scipy/scipy-svn,lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,jasonmccampbell/scipy-refactor,scipy/scipy-svn,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,jasonmccampbell/scipy-refactor,jasonmccampbell/scipy-refactor,scipy/scipy-svn,lesserwhirls/scipy-cwt | Lib/io/data_store.py | Lib/io/data_store.py | """ Load or save values to a file.
Shelves work well for storing data, but they are slow to access
repeatedly - especially for large data sets. This module allows
you to store data to a file and then load it back into the workspace.
When the data is stored, a python module is also created as the
"namespace for the data"
>>> import data_store
>>> import os
>>> a = 1
>>> data_store.save('c:/temp/junker',{'a':a})
>>> os.chdir('c:/temp')
>>> import junker
>>> junker.a
1
"""
__all__ = ['load', 'save', 'create_module', 'create_shelf']
import dumb_shelve
import os
def load(module):
""" Load data into module from a shelf with
the same name as the module.
"""
dir,filename = os.path.split(module.__file__)
filebase = filename.split('.')[0]
fn = os.path.join(dir, filebase)
f = dumb_shelve.open(fn, "r")
#exec( 'import ' + module.__name__)
for i in f.keys():
exec( 'import ' + module.__name__+ ';' +
module.__name__+'.'+i + '=' + 'f["' + i + '"]')
# print i, 'loaded...'
# print 'done'
def save(file_name=None,data=None):
""" Save the dictionary "data" into
a module and shelf named save
"""
import dumb_shelve
create_module(file_name)
create_shelf(file_name,data)
def create_module(file_name):
""" Create the module file.
"""
if not os.path.exists(file_name+'.py'): # don't clobber existing files
module_name = os.path.split(file_name)[-1]
f = open(file_name+'.py','w')
f.write('import scipy.io.data_store as data_store\n')
f.write('import %s\n' % module_name)
f.write('data_store.load(%s)' % module_name)
f.close()
def create_shelf(file_name,data):
"""Use this to write the data to a new file
"""
shelf_name = file_name.split('.')[0]
f = dumb_shelve.open(shelf_name,'w')
for i in data.keys():
# print 'saving...',i
f[i] = data[i]
# print 'done'
f.close()
| """ Load or save values to a file.
Shelves work well for storing data, but they are slow to access
repeatedly - especially for large data sets. This module allows
you to store data to a file and then load it back into the workspace.
When the data is stored, a python module is also created as the
"namespace for the data"
>>> import data_store
>>> import os
>>> a = 1
>>> data_store.save('c:/temp/junker',{'a':a})
>>> os.chdir('c:/temp')
>>> import junker
>>> junker.a
1
"""
__all__ = ['load', 'save', 'create_module', 'create_shelf']
import dumb_shelve
import string
import os
def load(module):
""" Load data into module from a shelf with
the same name as the module.
"""
dir,filename = os.path.split(module.__file__)
filebase = string.split(filename,'.')[0]
fn = os.path.join(dir, filebase)
f = dumb_shelve.open(fn, "r")
#exec( 'import ' + module.__name__)
for i in f.keys():
exec( 'import ' + module.__name__+ ';' +
module.__name__+'.'+i + '=' + 'f["' + i + '"]')
# print i, 'loaded...'
# print 'done'
def save(file_name=None,data=None):
""" Save the dictionary "data" into
a module and shelf named save
"""
import dumb_shelve
create_module(file_name)
create_shelf(file_name,data)
def create_module(file_name):
""" Create the module file.
"""
if not os.path.exists(file_name+'.py'): # don't clobber existing files
module_name = os.path.split(file_name)[-1]
f = open(file_name+'.py','w')
f.write('import scipy.io.data_store as data_store\n')
f.write('import %s\n' % module_name)
f.write('data_store.load(%s)' % module_name)
f.close()
def create_shelf(file_name,data):
"""Use this to write the data to a new file
"""
shelf_name = string.split(file_name,'.')[0]
f = dumb_shelve.open(shelf_name,'w')
for i in data.keys():
# print 'saving...',i
f[i] = data[i]
# print 'done'
f.close()
| bsd-3-clause | Python |
c0f7dd3445455e9cb51b84944c54ad6f4d45a5dc | Update filtereval_caner.py | sbg/Mitty,sbg/Mitty | mitty/benchmarking/filtereval_caner.py | mitty/benchmarking/filtereval_caner.py | """Needed a way to go through the evaluation VCF from VCF benchmarking and spit out the FP and FN
calls into separate VCFs and then extract reads from those regions into a BAM.
vcffilter can mark the FP and FNs but leaves all the other records in. Would need to chain with vcftools
and at this point things are complicated enough that it's easier to use Python for this.
This tools drops two VCFs, one for FN and one for FP. Each has an associated ROI BED file.
These BED files can be used with samtools view to generate BAMs that contain reads from just these variants
"""
import time
import pysam
import logging
logger = logging.getLogger(__name__)
def extract_fp_fn(fname_in, prefix_out):
"""
:param fname_in:
:param prefix_out:
:return:
"""
logger.debug('Starting filtering ...')
t0 = time.time()
mode = 'rb' if fname_in.endswith('bcf') else 'r'
vcf_in = pysam.VariantFile(fname_in, mode)
bam_in = pysam.AlignmentFile(fname_in[:-3] + 'bam', "rb")#caner
fp_vcf_out = pysam.VariantFile(prefix_out + '-fp.vcf', mode='w', header=vcf_in.header)
fp_bam_out = pysam.AlignmentFile(prefix_out+'-fp.bam', "wb", template=bam_in) #caner
fp_roi_bed = open(prefix_out + '-fp-roi.bed', 'w')
fn_vcf_out = pysam.VariantFile(prefix_out + '-fn.vcf', mode='w', header=vcf_in.header)
fn_roi_bed = open(prefix_out + '-fn-roi.bed', 'w')
fn_bam_out = pysam.AlignmentFile(prefix_out+'-fn.bam', "wb", template=bam_in) #caner
n, fp_cnt, fn_cnt = -1, 0, 0
for n, v in enumerate(vcf_in):
s = v.samples['TRUTH']
if s['BD'] == 'FN':
fn_vcf_out.write(v)
fp_bam_out.write(bam_in.fetch(v.chrom, v.start, v.stop))#caner
save_roi(fn_roi_bed, v)
fn_cnt += 1
s = v.samples['QUERY']
if s['BD'] == 'FP':
fp_vcf_out.write(v)
fn_bam_out.write(bam_in.fetch(v.chrom, v.start, v.stop))#caner
save_roi(fp_roi_bed, v)
fp_cnt += 1
bam_in.close()#caner
fp_bam_out.close()#caner
fn_bam_out.close()#caner
logger.debug('Processed {} calls'.format(n + 1))
logger.debug('Sample had {} FP, {} FN'.format(fp_cnt, fn_cnt))
t1 = time.time()
logger.debug('Took {} s'.format(t1 - t0))
def save_roi(fp, v):
fp.write('{}\t{}\t{}\n'.format(v.chrom, v.start, v.stop))
extract_fp_fn("input.vcf","outcan")
| """Needed a way to go through the evaluation VCF from VCF benchmarking and spit out the FP and FN
calls into separate VCFs and then extract reads from those regions into a BAM.
vcffilter can mark the FP and FNs but leaves all the other records in. Would need to chain with vcftools
and at this point things are complicated enough that it's easier to use Python for this.
This tools drops two VCFs, one for FN and one for FP. Each has an associated ROI BED file.
These BED files can be used with samtools view to generate BAMs that contain reads from just these variants
"""
import time
import pysam
import logging
logger = logging.getLogger(__name__)
def extract_fp_fn(fname_in, prefix_out):
"""
:param fname_in:
:param prefix_out:
:return:
"""
logger.debug('Starting filtering ...')
t0 = time.time()
mode = 'rb' if fname_in.endswith('bcf') else 'r'
vcf_in = pysam.VariantFile(fname_in, mode)
fp_vcf_out = pysam.VariantFile(prefix_out + '-fp.vcf', mode='w', header=vcf_in.header)
fp_roi_bed = open(prefix_out + '-fp-roi.bed', 'w')
fn_vcf_out = pysam.VariantFile(prefix_out + '-fn.vcf', mode='w', header=vcf_in.header)
fn_roi_bed = open(prefix_out + '-fn-roi.bed', 'w')
bam_in = pysam.AlignmentFile(fname_in[:-3] + 'bam', "rb")#caner
fp_bam_out = pysam.AlignmentFile(prefix_out+'-fp.bam', "wb", template=bam_in) #caner
fn_bam_out = pysam.AlignmentFile(prefix_out+'-fn.bam', "wb", template=bam_in) #caner
n, fp_cnt, fn_cnt = -1, 0, 0
for n, v in enumerate(vcf_in):
s = v.samples['TRUTH']
if s['BD'] == 'FN':
fn_vcf_out.write(v)
fp_bam_out.write(bam_in.fetch(v.chrom, v.start, v.stop))#caner
save_roi(fn_roi_bed, v)
fn_cnt += 1
s = v.samples['QUERY']
if s['BD'] == 'FP':
fp_vcf_out.write(v)
fn_bam_out.write(bam_in.fetch(v.chrom, v.start, v.stop))#caner
save_roi(fp_roi_bed, v)
fp_cnt += 1
bam_in.close()#caner
fp_bam_out.close()#caner
fn_bam_out.close()#caner
logger.debug('Processed {} calls'.format(n + 1))
logger.debug('Sample had {} FP, {} FN'.format(fp_cnt, fn_cnt))
t1 = time.time()
logger.debug('Took {} s'.format(t1 - t0))
def save_roi(fp, v):
fp.write('{}\t{}\t{}\n'.format(v.chrom, v.start, v.stop))
extract_fp_fn("input.vcf","outcan")
| apache-2.0 | Python |
1382fd8afecdeca9bb1b6961a636b6502c16ad6e | Enumerate much more elegant to loop through list elements and regex each. | smehan/App-data-reqs,smehan/App-data-reqs | log-preprocessor.py | log-preprocessor.py | __author__ = 'shawnmehan'
import csv, os, re
# # first lets open the file, which is tab delimited because of , in description field and others
with open('./data/AppDataRequest2010-2015.tsv', 'rb') as csvfile:
testfile = open("./data/test.tsv", "wb") #TODO get proper line endings, not ^M
records = csv.reader(csvfile, delimiter='\t')
testwriter = csv.writer(testfile, delimiter='\t')
count = 0
for row in records:
for i, s in enumerate(row):
row[i] = re.sub(r'\n+', '', s)
testwriter.writerow(row)
count += 1
# for row in records:
# target = ','.join(row)
# print(target)
# if re.match(".*(?<!EOR)$", target):
# testwriter.writerow(row)
# elif re.match(".*EOR$", target):
# # print(row)
# # count += 1
# testwriter.writerow(row)
# else:
# testwriter.writerow(target)
print count
testfile.close() | __author__ = 'shawnmehan'
import csv, os, re
# # first lets open the file, which is tab delimited because of , in description field and others
with open('./data/AppDataRequest2010-2015.tsv', 'rb') as csvfile:
testfile = open("./data/test.tsv", "wb") #TODO get proper line endings, not ^M
records = csv.reader(csvfile, delimiter='\t')
testwriter = csv.writer(testfile, delimiter='\t')
count = 0
for row in records:
for e in row: #TODO still need to remove \n from list elements
e = re.sub(r'\n','',e)
testwriter.writerow(row)
count += 1
# for row in records:
# target = ','.join(row)
# print(target)
# if re.match(".*(?<!EOR)$", target):
# testwriter.writerow(row)
# elif re.match(".*EOR$", target):
# # print(row)
# # count += 1
# testwriter.writerow(row)
# else:
# testwriter.writerow(target)
print count
testfile.close() | apache-2.0 | Python |
af1fb4138c548f56cf27d734d106b889236c08e6 | Fix follow/unfollow signal weakref | grouan/udata,davidbgk/udata,opendatateam/udata,etalab/udata,etalab/udata,davidbgk/udata,jphnoel/udata,etalab/udata,davidbgk/udata,grouan/udata,grouan/udata,jphnoel/udata,opendatateam/udata,opendatateam/udata,jphnoel/udata | udata/core/followers/metrics.py | udata/core/followers/metrics.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.core.metrics import Metric, MetricMetaClass
from udata.i18n import lazy_gettext as _
from udata.models import Follow
from .signals import on_follow, on_unfollow
__all__ = ('FollowersMetric', )
class FollowersMetricMetaclass(MetricMetaClass):
def __new__(cls, name, bases, attrs):
# Ensure any child class compute itself on follow/unfollow
new_class = super(FollowersMetricMetaclass, cls).__new__(cls, name, bases, attrs)
if new_class.model:
def callback(follow):
if isinstance(follow.following, new_class.model):
new_class(follow.following).trigger_update()
on_follow.connect(callback, weak=False)
on_unfollow.connect(callback, weak=False)
return new_class
class FollowersMetric(Metric):
name = 'followers'
display_name = _('Followers')
__metaclass__ = FollowersMetricMetaclass
def get_value(self):
return Follow.objects.followers(self.target).count()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.core.metrics import Metric, MetricMetaClass
from udata.i18n import lazy_gettext as _
from udata.models import Follow
from .signals import on_follow, on_unfollow
__all__ = ('FollowersMetric', )
class FollowersMetricMetaclass(MetricMetaClass):
def __new__(cls, name, bases, attrs):
# Ensure any child class compute itself on follow/unfollow
new_class = super(FollowersMetricMetaclass, cls).__new__(cls, name, bases, attrs)
if new_class.model:
def callback(follow):
if isinstance(follow.following, new_class.model):
new_class(follow.following).trigger_update()
on_follow.connect(callback)
on_unfollow.connect(callback)
return new_class
class FollowersMetric(Metric):
name = 'followers'
display_name = _('Followers')
__metaclass__ = FollowersMetricMetaclass
def get_value(self):
return Follow.objects.followers(self.target).count()
| agpl-3.0 | Python |
fcbb0a4e6e1edba568783fade40061b5682affbd | Add test permissions bypass and change structure | makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin | geotrek/common/tests/__init__.py | geotrek/common/tests/__init__.py | # -*- encoding: utf-8 -*-
from django.contrib.auth.models import Permission
from django.utils import translation
from django.utils.translation import ugettext as _
# Workaround https://code.djangoproject.com/ticket/22865
from geotrek.common.models import FileType # NOQA
from mapentity.tests import MapEntityTest
from geotrek.authent.factories import StructureFactory
from geotrek.authent.tests import AuthentFixturesTest
class TranslationResetMixin(object):
def setUp(self):
translation.deactivate()
super(TranslationResetMixin, self).setUp()
class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest):
api_prefix = '/api/en/'
def get_bad_data(self):
return {'topology': 'doh!'}, _(u'Topology is not valid.')
def test_structure_is_set(self):
if not hasattr(self.model, 'structure'):
return
self.login()
response = self.client.post(self._get_add_url(), self.get_good_data())
self.assertEqual(response.status_code, 302)
obj = self.model.objects.last()
self.assertEqual(obj.structure, self.user.profile.structure)
def test_structure_is_not_changed_without_permission(self):
if not hasattr(self.model, 'structure'):
return
self.login()
structure = StructureFactory()
self.assertNotEqual(structure, self.user.profile.structure)
self.assertFalse(self.user.has_perm('authent.can_bypass_structure'))
obj = self.modelfactory.create(structure=structure)
data = self.get_good_data()
data['structure'] = self.user.profile.structure.pk
result = self.client.post(obj.get_update_url(), data)
self.assertEqual(result.status_code, 302)
self.assertEqual(self.model.objects.first().structure, structure)
self.logout()
def test_structure_is_changed_with_permission(self):
if not hasattr(self.model, 'structure'):
return
self.login()
perm = Permission.objects.get(codename='can_bypass_structure')
self.user.user_permissions.add(perm)
structure = StructureFactory()
self.assertNotEqual(structure, self.user.profile.structure)
obj = self.modelfactory.create(structure=structure)
data = self.get_good_data()
data['structure'] = self.user.profile.structure.pk
result = self.client.post(obj.get_update_url(), data)
self.assertEqual(result.status_code, 302)
self.assertEqual(self.model.objects.first().structure, self.user.profile.structure)
self.logout() | # -*- encoding: utf-8 -*-
from django.utils import translation
from django.utils.translation import ugettext as _
# Workaround https://code.djangoproject.com/ticket/22865
from geotrek.common.models import FileType # NOQA
from mapentity.tests import MapEntityTest
from geotrek.authent.factories import StructureFactory
from geotrek.authent.tests import AuthentFixturesTest
class TranslationResetMixin(object):
def setUp(self):
translation.deactivate()
super(TranslationResetMixin, self).setUp()
class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest):
api_prefix = '/api/en/'
def get_bad_data(self):
return {'topology': 'doh!'}, _(u'Topology is not valid.')
def test_structure_is_set(self):
if not hasattr(self.model, 'structure'):
return
self.login()
response = self.client.post(self._get_add_url(), self.get_good_data())
self.assertEqual(response.status_code, 302)
obj = self.model.objects.last()
self.assertEqual(obj.structure, self.user.profile.structure)
def test_structure_is_not_changed(self):
if not hasattr(self.model, 'structure'):
return
self.login()
structure = StructureFactory()
self.assertNotEqual(structure, self.user.profile.structure)
obj = self.modelfactory.create(structure=structure)
self.client.post(obj.get_update_url(), self.get_good_data())
self.assertEqual(obj.structure, structure)
| bsd-2-clause | Python |
5fef2befafa70e4a8a393f0479cbdf8648f5db78 | Convert Breadcrumbs to Titlecase, closes #48 | kaozente/MusicMashup,kaozente/MusicMashup,kaozente/MusicMashup | MusicMashupServer.py | MusicMashupServer.py | # server shizzle
import cherrypy
import os
# eigentliche arbeit macht die artist klasse
from MusicMashupArtist import MusicMashupArtist
# template gedoens
from mako.template import Template
from mako.lookup import TemplateLookup
from titlecase import titlecase
# fuer history generation
from urllib import quote_plus
class MusicMashupServer(object):
def __init__(self):
pass
@cherrypy.expose # wird von cherrypy auf eine URL gemappt
def index(self, query="", soloartist=0):
# initialize mako (template engine)
lookup = TemplateLookup(directories=['html'])
# show search page if no query has been made
if query == "":
print "[~] No query given, serving search page"
tmpl = lookup.get_template("search.htm")
return tmpl.render()
# query is present
else:
# create musicmashup object based on query:
self.artist = MusicMashupArtist(query)
# add new query to breadcrumbs list. create as list if not present
if not "history" in cherrypy.session:
cherrypy.session['history'] = []
# new search -> new breadcrumbs
if not query[:4] == "http":
cherrypy.session['history'] = []
# also, if name rather than query, convert to titlecase
query = titlecase(query)
# append newest query to list, template will determine if it's a URI or name
if not (len(cherrypy.session['history']) > 0 and cherrypy.session['history'][-1] == query):
cherrypy.session['history'].append(query)
# make sure the list has no more than 5 entries
maxentries = 10
if len(cherrypy.session['history']) > maxentries:
cherrypy.session['history'].pop
# load mako templates
tmpl = lookup.get_template("main.htm")
# add whole Artist object and history array from sessions
return tmpl.render(artist=self.artist, history=cherrypy.session['history'])
# End of class
if __name__ == '__main__':
print ("[~] Initializing...")
# bind to all IPv4 interfaces
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './static'
}
}
cherrypy.quickstart(MusicMashupServer(), '/', conf) | # server shizzle
import cherrypy
import os
# eigentliche arbeit macht die artist klasse
from MusicMashupArtist import MusicMashupArtist
# template gedoens
from mako.template import Template
from mako.lookup import TemplateLookup
# fuer history generation
from urllib import quote_plus
class MusicMashupServer(object):
def __init__(self):
pass
@cherrypy.expose # wird von cherrypy auf eine URL gemappt
def index(self, query="", soloartist=0):
# initialize mako (template engine)
lookup = TemplateLookup(directories=['html'])
# show search page if no query has been made
if query == "":
print "[~] No query given, serving search page"
tmpl = lookup.get_template("search.htm")
return tmpl.render()
# query is present
else:
# create musicmashup object based on query:
self.artist = MusicMashupArtist(query)
# add new query to breadcrumbs list. create as list if not present
if not "history" in cherrypy.session:
cherrypy.session['history'] = []
# new search -> new breadcrumbs
if not query[:4] == "http":
cherrypy.session['history'] = []
# append newest query to list, template will determine if it's a URI or name
if not (len(cherrypy.session['history']) > 0 and cherrypy.session['history'][-1] == query):
cherrypy.session['history'].append(query)
# make sure the list has no more than 5 entries
maxentries = 10
if len(cherrypy.session['history']) > maxentries:
cherrypy.session['history'].pop
# load mako templates
tmpl = lookup.get_template("main.htm")
# add whole Artist object and history array from sessions
return tmpl.render(artist=self.artist, history=cherrypy.session['history'])
# End of class
if __name__ == '__main__':
print ("[~] Initializing...")
# bind to all IPv4 interfaces
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './static'
}
}
cherrypy.quickstart(MusicMashupServer(), '/', conf) | mit | Python |
d6c5b9a19921ce2c1e3f2e30d6ce0468a5305e80 | Test with Flask-Testing to test POST request | terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python | myflaskapp/tests/test_flask_testing.py | myflaskapp/tests/test_flask_testing.py | from flask import Flask
from flask_testing import LiveServerTestCase
from myflaskapp.app import create_app
from myflaskapp.settings import DevConfig, ProdConfig
import requests
import datetime as dt
import pytest
from myflaskapp.user.models import Role, User
from myflaskapp.item.models import Item
from .factories import UserFactory
import requests
from bs4 import BeautifulSoup
import datetime as dt
class MyTest(LiveServerTestCase):
def create_app(self):
app = create_app(DevConfig)
app.config['TESTING'] = True
app.config['LIVESERVER_PORT'] = 55531
return app
def test_server_is_up_and_running(self):
#server_url = self.get_server_url()
#print("SERVER URL:{}".format(server_url))
response = requests.get(self.get_server_url())
self.assertEqual(response.status_code, 200)
def test_home_page_can_save_a_POST_request(self):
test_list_data = "TERRY BATES MFR"
client = requests.session()
url = self.get_server_url() + '/lists/'
print(url)
get_result = client.get(url)
soup = BeautifulSoup(get_result.text, 'html.parser')
# Find the input tag
input = soup.input
csrf_token_value = input['value']
print(input['value'])
# Create dictionary to store POST params
list_data = {'todo-item':test_list_data, 'todo-csrf_token':csrf_token_value}
post_result = client.post(url, data=list_data, headers=dict(Referer=url))
# Assert that our test data is in result
assert test_list_data in post_result.text
# Assert we have one item in DB
#assert len(Item.query.all()) == 1
| from flask import Flask
from flask_testing import LiveServerTestCase
from myflaskapp.app import create_app
from myflaskapp.settings import DevConfig, ProdConfig
import requests
class MyTest(LiveServerTestCase):
def create_app(self):
app = create_app(DevConfig)
app.config['TESTING'] = True
app.config['LIVESERVER_PORT'] = 55531
return app
def test_server_is_up_and_running(self):
#server_url = self.get_server_url()
#print("SERVER URL:{}".format(server_url))
response = requests.get(self.get_server_url())
self.assertEqual(response.status_code, 200)
| mit | Python |
bf2b6036a93db9314a1b0facf1865d74204a7c85 | add klarinetta/chaunter/suzu to character list | incnone/necrobot | necrobot/util/necrodancer/character.py | necrobot/util/necrodancer/character.py | from enum import Enum
class NDChar(Enum):
Cadence = 0
Melody = 1
Aria = 2
Dorian = 3
Eli = 4
Monk = 5
Dove = 6
Coda = 7
Bolt = 8
Bard = 9
Story = 10
All = 11
Nocturna = 12
Diamond = 13
Multichar = 14
Tempo = 15
Mary = 16
Reaper = 17
Klarinetta = 18
Chaunter = 19
Suzu = 20
Coh = 101
Link = 102
Zelda = 103
def __str__(self):
return self.name
@property
def levels_reversed(self):
return self == NDChar.Aria
@staticmethod
def fromstr(char_name):
for ndchar in NDChar:
if ndchar.name == char_name.capitalize():
return ndchar
return None
| from enum import Enum
class NDChar(Enum):
Cadence = 0
Melody = 1
Aria = 2
Dorian = 3
Eli = 4
Monk = 5
Dove = 6
Coda = 7
Bolt = 8
Bard = 9
Story = 10
All = 11
Nocturna = 12
Diamond = 13
Multichar = 14
Tempo = 15
Mary = 16
Reaper = 17
Coh = 101
Link = 102
Zelda = 103
def __str__(self):
return self.name
@property
def levels_reversed(self):
return self == NDChar.Aria
@staticmethod
def fromstr(char_name):
for ndchar in NDChar:
if ndchar.name == char_name.capitalize():
return ndchar
return None
| mit | Python |
cd931bccbcad189b01673e4aed8b5cf26d9e5324 | Update the version number | copasi/condor-copasi,Nucleoos/condor-copasi,Nucleoos/condor-copasi,copasi/condor-copasi | web_frontend/version.py | web_frontend/version.py | version = '0.3.0 beta'
| version = '0.2.1 beta'
| artistic-2.0 | Python |
3d805e8aac81df0c0c35cfc4ddc37489225b6464 | add conn close to example file | openego/dingo,openego/dingo | examples/example_single_grid_district.py | examples/example_single_grid_district.py | #!/usr/bin/env python3
"""This is a simple example file for DINGO.
__copyright__ = "Reiner Lemoine Institut, openego development group"
__license__ = "GNU GPLv3"
__author__ = "Jonathan Amme, Guido Pleßmann"
"""
# ===== IMPORTS AND CONFIGURATION =====
# import DB interface from oemof
import oemof.db as db
# import required modules of DINGO
from dingo.core import NetworkDingo
from dingo.tools import config as cfg_dingo, results
from dingo.tools.logger import setup_logger
# define logger
logger = setup_logger()
# load parameters from configs
cfg_dingo.load_config('config_db_tables.cfg')
cfg_dingo.load_config('config_calc.cfg')
cfg_dingo.load_config('config_files.cfg')
cfg_dingo.load_config('config_misc.cfg')
# ===== MAIN =====
# database connection
conn = db.connection(section='oedb')
# instantiate new dingo network object
nd = NetworkDingo(name='network')
# choose MV Grid Districts to import
mv_grid_districts = [3545]
# run DINGO on selected MV Grid District
nd.run_dingo(conn=conn,
mv_grid_districts_no=mv_grid_districts)
# export grids to database
nd.control_circuit_breakers(mode='close')
nd.export_mv_grid(conn, mv_grid_districts)
nd.export_mv_grid_new(conn, mv_grid_districts)
conn.close() | #!/usr/bin/env python3
"""This is a simple example file for DINGO.
__copyright__ = "Reiner Lemoine Institut, openego development group"
__license__ = "GNU GPLv3"
__author__ = "Jonathan Amme, Guido Pleßmann"
"""
# ===== IMPORTS AND CONFIGURATION =====
# import DB interface from oemof
import oemof.db as db
# import required modules of DINGO
from dingo.core import NetworkDingo
from dingo.tools import config as cfg_dingo, results
from dingo.tools.logger import setup_logger
# define logger
logger = setup_logger()
# load parameters from configs
cfg_dingo.load_config('config_db_tables.cfg')
cfg_dingo.load_config('config_calc.cfg')
cfg_dingo.load_config('config_files.cfg')
cfg_dingo.load_config('config_misc.cfg')
# ===== MAIN =====
# database connection
conn = db.connection(section='oedb')
# instantiate new dingo network object
nd = NetworkDingo(name='network')
# choose MV Grid Districts to import
mv_grid_districts = [3545]
# run DINGO on selected MV Grid District
nd.run_dingo(conn=conn,
mv_grid_districts_no=mv_grid_districts)
# export grids to database
nd.control_circuit_breakers(mode='close')
nd.export_mv_grid(conn, mv_grid_districts)
nd.export_mv_grid_new(conn, mv_grid_districts)
| agpl-3.0 | Python |
da8618b931ef3a845b77610ab84144687f0d1076 | Update name of filepath | dave-lab41/pelops,Lab41/pelops,Lab41/pelops,d-grossman/pelops,dave-lab41/pelops,d-grossman/pelops | pelops/etl/extract_feats_from_chips.py | pelops/etl/extract_feats_from_chips.py | import numpy as np
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
from keras.models import Model
def load_image(img_path, resizex=224, resizey=224):
data = image.load_img(img_path, target_size=(resizex, resizey))
x = image.img_to_array(data)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
# load the imagenet networks
def get_models():
# include_top needs to be True for this to work
base_model = ResNet50(weights='imagenet', include_top=True)
model = Model(input=base_model.input,
output=base_model.get_layer('flatten_1').output)
return (model, base_model)
# return feature vector for a given img, and model
def image_features(img, model):
features = model.predict(img)
return features
def extract_feats_from_chips(chipbase, output_fname):
model, base_model = get_models()
features = np.zeros((len(chipbase), 2048), dtype=np.float16)
chips = []
for index, chip in enumerate(chipbase):
chips.append(chip)
img_path = chip.filepath
img_data = load_image(img_path)
features[index] = image_features(img_data, model)
return (chips, features)
| import numpy as np
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
from keras.models import Model
def load_image(img_path, resizex=224, resizey=224):
data = image.load_img(img_path, target_size=(resizex, resizey))
x = image.img_to_array(data)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
# load the imagenet networks
def get_models():
# include_top needs to be True for this to work
base_model = ResNet50(weights='imagenet', include_top=True)
model = Model(input=base_model.input,
output=base_model.get_layer('flatten_1').output)
return (model, base_model)
# return feature vector for a given img, and model
def image_features(img, model):
features = model.predict(img)
return features
def extract_feats_from_chips(chipbase, output_fname):
model, base_model = get_models()
features = np.zeros((len(chipbase), 2048), dtype=np.float16)
chips = []
for index, chip in enumerate(chipbase):
chips.append(chip)
img_path = chipbase.get_chip_image_path(chip)
img_data = load_image(img_path)
features[index] = image_features(img_data, model)
return (chips, features)
| apache-2.0 | Python |
3e4edb97ac94b75c7e3a52a6e055b76ad1fbf894 | Make template tag tests use mock model classes | wylee/django-perms,PSU-OIT-ARC/django-perms | permissions/tests/test_templatetags.py | permissions/tests/test_templatetags.py | from django.test import TestCase
from django.template import Context, Template
from permissions import PermissionsRegistry
from .base import Model, User
def can_do(user):
return 'can_do' in user.permissions
def can_do_with_model(user, instance):
return 'can_do_with_model' in user.permissions
class TestTemplateTags(TestCase):
def setUp(self):
self.registry = PermissionsRegistry()
self.registry.register(can_do)
self.registry.register(can_do_with_model, model=Model)
self.template = Template(
'{% load permissions %}'
'{% if user|can_do %}can_do{% endif %}'
'{% if user|can_do_with_model:instance %}can_do_with_model{% endif %}'
)
def test_can_do(self):
user = User(permissions=['can_do'])
context = Context({'user': user})
result = self.template.render(context)
self.assertIn('can_do', result)
def test_cannot_do(self):
user = User()
context = Context({'user': user})
result = self.template.render(context)
self.assertNotIn('can_do', result)
def test_can_do_with_model(self):
user = User(permissions=['can_do_with_model'])
context = Context({'user': user, 'instance': Model()})
result = self.template.render(context)
self.assertIn('can_do_with_model', result)
def test_cannot_do_with_model(self):
user = User()
context = Context({'user': user, 'instance': Model()})
result = self.template.render(context)
self.assertNotIn('can_do_with_model', result)
| from django.test import TestCase
from django.template import Context, Template
from permissions import PermissionsRegistry
class Model:
pass
def can_do(user):
return user is not None
def can_do_with_model(user, instance):
return None not in (user, instance)
class TestTemplateTags(TestCase):
def setUp(self):
self.registry = PermissionsRegistry()
self.registry.register(can_do)
self.registry.register(can_do_with_model, model=Model)
self.template = Template(
'{% load permissions %}'
'{% if user|can_do %}can_do{% endif %}'
'{% if user|can_do_with_model:instance %}can_do_with_model{% endif %}'
)
def test_can_do(self):
context = Context({'user': object(), 'instance': None})
result = self.template.render(context)
self.assertIn('can_do', result)
def test_cannot_do(self):
context = Context({'user': None, 'instance': None})
result = self.template.render(context)
self.assertNotIn('can_do', result)
def test_can_do_with_model(self):
context = Context({'user': object(), 'instance': object()})
result = self.template.render(context)
self.assertIn('can_do_with_model', result)
def test_cannot_do_with_model(self):
context = Context({'user': None, 'instance': object()})
result = self.template.render(context)
self.assertNotIn('can_do_with_model', result)
| mit | Python |
cd34f60c6018241770b5d5dc1ae6833a28b0db36 | Make VersionUpgrade a class | onitake/Uranium,onitake/Uranium | UM/VersionUpgrade.py | UM/VersionUpgrade.py | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.Logger import Logger
from UM.PluginObject import PluginObject
## A type of plug-in that upgrades the configuration from an old file format to
# a newer one.
#
# Each version upgrade plug-in can convert machine instances, preferences and
# profiles from one version to one other version. Which versions that are is
# specified in the metadata of the plug-in.
class VersionUpgrade(PluginObject):
## Initialises a version upgrade plugin instance.
def __init__(self):
super().__init__(self)
## Upgrades a machine instance file from one file format to another.
#
# This parses the serialised data of a machine instance and converts it to
# a serialised form of the new file format.
#
# \param serialised A machine instance, serialised in an old file format.
# \return A machine instance, serialised in a newer file format.
def upgradeMachineInstance(self, serialised):
Logger.log("w", "This version upgrade plug-in defines no way to upgrade machine instances.") #A subclass should implement this.
## Upgrades a preferences file from one file format to another.
#
# This parses the serialised data of a preferences file and converts it to
# a serialised form of the new file format.
#
# \param serialised A preferences file, serialised in an old file format.
# \return A preferences file, serialised in a newer file format.
def upgradePreferences(self, serialised):
Logger.log("w", "This version upgrade plug-in defines no way to upgrade preferences.") #A subclass should implement this.
## Upgrades a profile from one file format to another.
#
# This parses the serialised data of a profile and converts it to a
# serialised form of the new file format.
#
# \param serialised A profile, serialised in an old file format.
# \return A profile, serialised in a newer file format.
def upgradeProfile(self, serialised):
Logger.log("w", "This version upgrade plug-in defines no way to upgrade profiles.") #A subclass should implement this. | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.Logger import Logger
from UM.PluginObject import PluginObject
## A type of plug-in that upgrades the configuration from an old file format to
# a newer one.
#
# Each version upgrade plug-in can convert machine instances, preferences and
# profiles from one version to one other version. Which versions that are is
# specified in the metadata of the plug-in.
def VersionUpgrade(PluginObject):
## Initialises a version upgrade plugin instance.
def __init__(self):
super().__init__(self)
## Upgrades a machine instance file from one file format to another.
#
# This parses the serialised data of a machine instance and converts it to
# a serialised form of the new file format.
#
# \param serialised A machine instance, serialised in an old file format.
# \return A machine instance, serialised in a newer file format.
def upgradeMachineInstance(self, serialised):
Logger.log("w", "This version upgrade plug-in defines no way to upgrade machine instances.") #A subclass should implement this.
## Upgrades a preferences file from one file format to another.
#
# This parses the serialised data of a preferences file and converts it to
# a serialised form of the new file format.
#
# \param serialised A preferences file, serialised in an old file format.
# \return A preferences file, serialised in a newer file format.
def upgradePreferences(self, serialised):
Logger.log("w", "This version upgrade plug-in defines no way to upgrade preferences.") #A subclass should implement this.
## Upgrades a profile from one file format to another.
#
# This parses the serialised data of a profile and converts it to a
# serialised form of the new file format.
#
# \param serialised A profile, serialised in an old file format.
# \return A profile, serialised in a newer file format.
def upgradeProfile(self, serialised):
Logger.log("w", "This version upgrade plug-in defines no way to upgrade profiles.") #A subclass should implement this. | agpl-3.0 | Python |
d4e4fbe945339f650294fa986834c023265164df | add views config to default config in proto | aacanakin/glim | glim/proto/project/app/config/default.py | glim/proto/project/app/config/default.py | facades = [
# bunch of services to be loaded up when web server starts
]
extensions = [
# bunch of extensions to be loaded up when web server starts
# 'gredis'
]
config = {
'extensions' : {
# 'gredis' : {
# 'default' : {
# 'host' : 'localhost',
# 'port' : '1234',
# 'db' : 0
# }
# }
},
# database configuration
'db' : {
'default' : {
'driver' : 'mysql',
'host' : 'localhost',
'schema' : 'test',
'user' : 'root',
'password' : '',
},
},
# app specific configurations
# reloader: detects changes in the code base and automatically restarts web server
# debugger: enable werkzeug's default debugger
'glim' : {
'reloader' : True,
'debugger' : True,
'sessions' : {
'id_header' : 'glim_session',
'path' : 'app/storage/sessions'
},
'views' : {
'path' : 'app/views'
}
}
} | facades = [
# bunch of services to be loaded up when web server starts
]
extensions = [
# bunch of extensions to be loaded up when web server starts
# 'gredis'
]
config = {
'extensions' : {
# 'gredis' : {
# 'default' : {
# 'host' : 'localhost',
# 'port' : '1234',
# 'db' : 0
# }
# }
},
# database configuration
'db' : {
'default' : {
'driver' : 'mysql',
'host' : 'localhost',
'schema' : 'test',
'user' : 'root',
'password' : '',
},
},
# app specific configurations
# reloader: detects changes in the code base and automatically restarts web server
# debugger: enable werkzeug's default debugger
'glim' : {
'reloader' : True,
'debugger' : True,
'sessions' : {
'id_header' : 'glim_session',
'path' : 'app/storage/sessions'
}
}
} | mit | Python |
94189ae76707942cb73be6517466768847e22a3c | solve string2 | haozai309/hello_python | google-python-exercises/basic/string2.py | google-python-exercises/basic/string2.py | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
if len(s) >= 3:
if s.endswith("ing"):
s += "ly"
else:
s += "ing"
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
start = s.find("not")
end = s.find("bad")
if start != -1 and end > start:
s = s[:start] + "good" + s[end+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
div_a = (len(a) + 1) / 2
div_b = (len(b) + 1) / 2
return a[:div_a] + b[:div_b] + a[div_a:] + b[div_b:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
return
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
return
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 | Python |
f28c46cdab80ff72859007939874bae90b56f03c | Remove unused | svenvandescheur/svenv.nl-app,svenvandescheur/svenv.nl-app,svenvandescheur/svenv.nl-app,svenvandescheur/svenv.nl-app | svenv/blog/views.py | svenv/blog/views.py | from django.views import generic
from blog.models import Blog
class IndexView(generic.ListView):
template_name = 'blog/index.html'
context_object_name = 'blog_list'
def get_queryset(self):
return Blog.objects.order_by('date').reverse() | from django.shortcuts import render
from django.views import generic
from blog.models import Blog
class IndexView(generic.ListView):
template_name = 'blog/index.html'
context_object_name = 'blog_list'
def get_queryset(self):
return Blog.objects.order_by('date').reverse() | mit | Python |
0546126cc5fb1a138c96481061084e9e5fc86e34 | Bump version | matrix-org/synapse,illicitonion/synapse,howethomas/synapse,matrix-org/synapse,howethomas/synapse,matrix-org/synapse,iot-factory/synapse,TribeMedia/synapse,rzr/synapse,matrix-org/synapse,illicitonion/synapse,howethomas/synapse,rzr/synapse,illicitonion/synapse,howethomas/synapse,iot-factory/synapse,iot-factory/synapse,rzr/synapse,illicitonion/synapse,howethomas/synapse,TribeMedia/synapse,iot-factory/synapse,rzr/synapse,TribeMedia/synapse,rzr/synapse,iot-factory/synapse,TribeMedia/synapse,matrix-org/synapse,matrix-org/synapse,illicitonion/synapse,TribeMedia/synapse | synapse/__init__.py | synapse/__init__.py | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.7.0c"
| # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.7.0b"
| apache-2.0 | Python |
6edec8cba26259d0e05bfceb34329e28d9960b03 | bump version | billyshambrook/taskman | taskman/__init__.py | taskman/__init__.py | __version__ = '0.0.3'
| __version__ = '0.0.2'
| bsd-2-clause | Python |
8a7e81bd4725282a907532dd7419a44a0614c08b | Use referrers not path's on the top referrers list | gnublade/django-request,kylef/django-request,gnublade/django-request,kylef/django-request,kylef/django-request,gnublade/django-request | request/views.py | request/views.py | from datetime import datetime, timedelta, date
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
from django.utils import simplejson, importlib
from request.models import Request
from request import settings
def set_count(items):
item_count = {}
for item in items:
if not item: continue
if not item_count.has_key(item): item_count[item] = 0
item_count[item] += 1
items = [(v, k) for k, v in item_count.iteritems()]
items.sort()
items.reverse()
return [(k, v) for v, k in items]
def overview(request):
days = [date.today()-timedelta(day) for day in range(30)]
browsers = set_count(Request.objects.attr_list('browser'))[:5]
return render_to_response('admin/request/overview.html', {
'title': _('Request overview'),
'lastest_requests': Request.objects.all()[:5],
'info_table': (
(_('Unique visitors'), [getattr(Request.objects.all(), x, None)().aggregate(Count('ip', distinct=True))['ip__count'] for x in 'today', 'this_week', 'this_month', 'this_year', 'all']),
(_('Unique visits'), [getattr(Request.objects.unique_visits(), x, None)().count() for x in 'today', 'this_week', 'this_month', 'this_year', 'all']),
(_('Hits'), [getattr(Request.objects.all(), x, None)().count() for x in 'today', 'this_week', 'this_month', 'this_year', 'all'])
),
'traffic_graph': simplejson.dumps([getattr(importlib.import_module(module_path[:module_path.rindex('.')]), module_path[module_path.rindex('.')+1:], None)(days) for module_path in settings.REQUEST_TRAFFIC_GRAPH_MODULES]),
'top_paths': set_count(Request.objects.filter(response__lt=400).values_list('path', flat=True))[:10],
'top_error_paths': set_count(Request.objects.filter(response__gte=400).values_list('path', flat=True))[:10],
'top_referrers': set_count(Request.objects.unique_visits().values_list('referer', flat=True))[:10],
'top_browsers': 'http://chart.apis.google.com/chart?cht=p3&chd=t:%s&chs=440x190&chl=%s' % (','.join([str(browser[1]) for browser in browsers]), '|'.join([browser[0] for browser in browsers])),
'requests_url': '/admin/request/request/',
'use_hosted_media': settings.REQUEST_USE_HOSTED_MEDIA
}, context_instance=RequestContext(request))
| from datetime import datetime, timedelta, date
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
from django.utils import simplejson, importlib
from request.models import Request
from request import settings
def set_count(items):
item_count = {}
for item in items:
if not item: continue
if not item_count.has_key(item): item_count[item] = 0
item_count[item] += 1
items = [(v, k) for k, v in item_count.iteritems()]
items.sort()
items.reverse()
return [(k, v) for v, k in items]
def overview(request):
days = [date.today()-timedelta(day) for day in range(30)]
browsers = set_count(Request.objects.attr_list('browser'))[:5]
return render_to_response('admin/request/overview.html', {
'title': _('Request overview'),
'lastest_requests': Request.objects.all()[:5],
'info_table': (
(_('Unique visitors'), [getattr(Request.objects.all(), x, None)().aggregate(Count('ip', distinct=True))['ip__count'] for x in 'today', 'this_week', 'this_month', 'this_year', 'all']),
(_('Unique visits'), [getattr(Request.objects.unique_visits(), x, None)().count() for x in 'today', 'this_week', 'this_month', 'this_year', 'all']),
(_('Hits'), [getattr(Request.objects.all(), x, None)().count() for x in 'today', 'this_week', 'this_month', 'this_year', 'all'])
),
'traffic_graph': simplejson.dumps([getattr(importlib.import_module(module_path[:module_path.rindex('.')]), module_path[module_path.rindex('.')+1:], None)(days) for module_path in settings.REQUEST_TRAFFIC_GRAPH_MODULES]),
'top_paths': set_count(Request.objects.filter(response__lt=400).values_list('path', flat=True))[:10],
'top_error_paths': set_count(Request.objects.filter(response__gte=400).values_list('path', flat=True))[:10],
'top_referrers': set_count(Request.objects.unique_visits().values_list('path', flat=True))[:10],
'top_browsers': 'http://chart.apis.google.com/chart?cht=p3&chd=t:%s&chs=440x190&chl=%s' % (','.join([str(browser[1]) for browser in browsers]), '|'.join([browser[0] for browser in browsers])),
'requests_url': '/admin/request/request/',
'use_hosted_media': settings.REQUEST_USE_HOSTED_MEDIA
}, context_instance=RequestContext(request))
| bsd-2-clause | Python |
7e25fea1c00c298355fbc58528d307816d01c261 | Update example2.py | progress/WhyABL,progress/WhyABL,progress/WhyABL | TempTables/example2.py | TempTables/example2.py | # Why ABL Example
# Authors: Bill Wood, Alan Estrada
# File Name: BasicQuery/example2.py
import MySQLdb as mdb
class TempTable:
customerNumber = 0
email = 0
with open('input.txt', 'r') as f:
lines = f.readlines()
temptable = []
for line in lines:
words = line.split()
tt = TempTable()
tt.customerNumber = int(words[0])
tt.email = int(words[1])
temptable.append(tt)
try:
db = mdb.connect('localhost', 'root', '', 'classicmodels')
cur = db.cursor()
# After being given a list of customers and the emails of their new sales reps,
# iterate through each customer and assign them the sales rep that the email
# belongs to.
#
# This is analagous to the ABL code:
# FOR EACH tt:
# FIND Customers WHERE Customers.customerNumber = tt.customerNumber.
# FIND employees WHERE employees.email = tt.email.
# Customers.salesRepEmployeeNumber = employees.employeeNumber.
# END.
for tt in temptable:
cur.execute("UPDATE customers " + \
"SET salesRepEmployeeNumber = " + \
"(SELECT employeeNumber FROM employees WHERE email = {}) ".format(tt.email) + \
"WHERE customerNumber = {}".format(tt.customerNumber))
db.commit()
print "{} rows updated".format(cur.rowcount)
except Exception as e:
db.rollback()
print e
finally:
if cur:
cur.close()
if db:
db.close()
| import MySQLdb as mdb
class TempTable:
customerNumber = 0
salesRep = 0
with open('input.txt', 'r') as f:
lines = f.readlines()
temptable = []
for line in lines:
words = line.split()
tt = TempTable()
tt.customerNumber = int(words[0])
tt.salesRep = int(words[1])
temptable.append(tt)
try:
db = mdb.connect('localhost', 'root', '', 'classicmodels')
cur = db.cursor()
for tt in temptable:
cur.execute("UPDATE customers " + \
"SET salesRepEmployeeNumber = " + \
"(SELECT employeeNumber FROM employees WHERE employeeNumber = {}) ".format(tt.salesRep) + \
"WHERE customerNumber = {}".format(tt.salesRep))
db.commit()
print "{} rows updated".format(cur.rowcount)
except Exception as e:
db.rollback()
print e
finally:
if cur:
cur.close()
if db:
db.close()
| apache-2.0 | Python |
a1ec0d3eb543ab704a4a4db237571ae849548a05 | add model: PartnerReview | unicef/un-partner-portal,unicef/un-partner-portal,unicef/un-partner-portal,unicef/un-partner-portal | backend/unpp_api/apps/partner/models.py | backend/unpp_api/apps/partner/models.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from model_utils.models import TimeStampedModel
class Partner(TimeStampedModel):
"""
"""
legal_name = models.CharField(max_length=255)
# display_type = International, national
hq = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
country = models.ForeignKey('common.Country', related_name="partners")
class Meta:
ordering = ['id']
def __str__(self):
return "Partner: {} <pk:{}>".format(self.name, self.id)
class PartnerProfile(TimeStampedModel):
"""
"""
partner = models.ForeignKey(Partner, related_name="profile")
alias_name = models.CharField(max_length=255, null=True, blank=True)
former_legal_name = models.CharField(max_length=255, null=True, blank=True)
org_head_first_name = models.CharField(max_length=255, null=True, blank=True)
org_head_last_name = models.CharField(max_length=255, null=True, blank=True)
org_head_email = models.EmailField(max_length=255, null=True, blank=True)
class Meta:
ordering = ['id']
def __str__(self):
return "PartnerProfile <pk:{}>".format(self.id)
class PartnerMember(TimeStampedModel):
"""
"""
user = models.ForeignKey('account.User', related_name="partner_members")
partner_profile = models.ForeignKey(PartnerProfile, related_name="partner_members")
title = models.CharField(max_length=255)
# role = ??? the same that we have in agency?
class Meta:
ordering = ['id']
def __str__(self):
return "PartnerMember: {} <pk:{}>".format(self.title, self.id)
class PartnerReview(TimeStampedModel):
partner = models.ForeignKey(Partner, related_name="reviews")
agency = models.ForeignKey('agency.Agency', related_name="partner_reviews")
reviewer = models.ForeignKey('account.User', related_name="partner_reviews")
# display_type = TODO: need to get !
eoi = models.ForeignKey('project.EOI', related_name="partner_reviews")
# performance_pm = Highly satisfactory, satisfactory, not satisfactory
# peformance_financial = Highly satisfactory, satisfactory, not satisfactory
# performance_com_eng = Highly satisfactory, satisfactory, not satisfactory
ethical_concerns = models.BooleanField(default=False, verbose_name='Ethical concerns?')
does_recommend = models.BooleanField(default=False, verbose_name='Does recommend?')
comment = models.TextField()
class Meta:
ordering = ['id']
def __str__(self):
return "PartnerReview <pk:{}>".format(self.id)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from model_utils.models import TimeStampedModel
class Partner(TimeStampedModel):
"""
"""
legal_name = models.CharField(max_length=255)
# display_type = International, national
hq = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
country = models.ForeignKey('common.Country', related_name="partners")
class Meta:
ordering = ['id']
def __str__(self):
return "Partner: {} <pk:{}>".format(self.name, self.id)
class PartnerProfile(TimeStampedModel):
"""
"""
partner = models.ForeignKey(Partner, related_name="profile")
alias_name = models.CharField(max_length=255, null=True, blank=True)
former_legal_name = models.CharField(max_length=255, null=True, blank=True)
org_head_first_name = models.CharField(max_length=255, null=True, blank=True)
org_head_last_name = models.CharField(max_length=255, null=True, blank=True)
org_head_email = models.EmailField(max_length=255, null=True, blank=True)
class Meta:
ordering = ['id']
def __str__(self):
return "PartnerProfile <pk:{}>".format(self.id)
class PartnerMember(TimeStampedModel):
"""
"""
user = models.ForeignKey('account.User', related_name="partner_members")
partner_profile = models.ForeignKey(PartnerProfile, related_name="partner_members")
title = models.CharField(max_length=255)
# role = ??? the same that we have in agency?
class Meta:
ordering = ['id']
def __str__(self):
return "PartnerMember: {} <pk:{}>".format(self.title, self.id)
| apache-2.0 | Python |
0299ca81aa017b97301b7050fa9acf78ef1f11cb | Use refabric's format_socket, fix socket mkdir | Sportamore/blues,5monkeys/blues,5monkeys/blues,5monkeys/blues,Sportamore/blues,Sportamore/blues | blues/application/providers/gunicorn.py | blues/application/providers/gunicorn.py | import os
from refabric.context_managers import sudo
from refabric.utils import info
from refabric.utils.socket import format_socket
from ..project import sudo_project, virtualenv_path
from ... import debian, python, virtualenv
from ...app import blueprint
from .base import ManagedProvider
class GunicornProvider(ManagedProvider):
name = 'gunicorn'
default_manager = 'supervisor'
def install(self):
with sudo_project(), virtualenv.activate(virtualenv_path()):
python.pip('install', 'gunicorn')
self.manager.install()
self.create_socket()
def create_socket(self):
socket = blueprint.get('web.socket')
if ':' in socket: # It's a tcp socket
return
# It's an unix socket
path = socket
if len(path.split('/')) < 2:
raise ValueError('socket should not be placed in /.')
info('Creating socket for gunicorn: {}', path)
with sudo():
mkdir_result = debian.mkdir(os.path.dirname(path),
owner=self.project,
group='www-data')
def get_context(self):
context = super(GunicornProvider, self).get_context()
socket_string = blueprint.get('web.socket')
if socket_string:
socket_string = format_socket(socket_string)
bp = {
'socket': socket_string,
'workers': blueprint.get('web.workers', debian.nproc() * 2),
'module': blueprint.get('web.module'),
}
context.update(bp)
return context
def configure(self):
context = self.get_context()
self.manager.configure_provider(self,
context,
program_name=self.project)
def configure_web(self):
return self.configure() | import os
from urlparse import urlparse
from blues import python, virtualenv
from blues.application.project import sudo_project, project_home, \
virtualenv_path
from refabric.context_managers import sudo
from ... import debian
from ...app import blueprint
from .base import ManagedProvider
from refabric.utils import info
class GunicornProvider(ManagedProvider):
name = 'gunicorn'
default_manager = 'supervisor'
def install(self):
with sudo_project(), virtualenv.activate(virtualenv_path()):
python.pip('install', 'gunicorn')
self.manager.install()
self.create_socket()
def create_socket(self):
socket = blueprint.get('web.socket')
if ':' in socket: # It's a tcp socket
return
# It's an unix socket
path = socket
if len(path.split('/')) < 2:
raise ValueError('socket should not be placed in /.')
info('Creating socket for gunicorn: {}', path)
with sudo():
mkdir_result = debian.mkdir(os.path.dirname(path))
# If we could not create the directory, don't chown it.
# mkdir returns 0 if successsfully created or already exists,
# and > 0 for permission denied.
if mkdir_result is not None and mkdir_result.return_code == 0:
debian.chown(os.path.dirname(path), self.project, 'www-data')
def get_context(self):
context = super(GunicornProvider, self).get_context()
socket = blueprint.get('web.socket')
host, _, port = socket.partition(':')
if not port:
socket = 'unix:{}'.format(socket)
bp = {
'socket': socket,
'workers': blueprint.get('web.workers', debian.nproc() * 2),
'module': blueprint.get('web.module'),
}
context.update(bp)
return context
def configure(self):
context = self.get_context()
self.manager.configure_provider(self,
context,
program_name=self.project)
def configure_web(self):
return self.configure() | mit | Python |
426b2cb5487e4dfcd3d92d4f520ac6db51e82dbf | Allow to render a snippet from a non-site scope as partial | homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps | byceps/blueprints/snippet/templating.py | byceps/blueprints/snippet/templating.py | """
byceps.blueprints.snippet.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import sys
import traceback
from flask import abort, g, render_template, url_for
from jinja2 import TemplateNotFound
from ...services.snippet import service as snippet_service
from ...services.snippet.service import SnippetNotFound
from ...services.snippet.transfer.models import Scope
from ...util.templating import get_variable_value, load_template
def render_snippet_as_page(version):
"""Render the given version of the snippet, or an error page if
that fails.
"""
try:
context = get_snippet_context(version)
return render_template('snippet/view.html', **context)
except TemplateNotFound:
abort(404)
except Exception as e:
print('Error in snippet markup:', e, file=sys.stderr)
traceback.print_exc()
context = {
'message': str(e),
}
return render_template('snippet/error.html', **context), 500
def get_snippet_context(version):
"""Return the snippet context to insert into the outer template."""
template = _load_template_with_globals(version.body)
current_page = get_variable_value(template, 'current_page')
title = version.title
head = _render_template(version.head) if version.head else None
body = template.render()
return {
'title': title,
'current_page': current_page,
'head': head,
'body': body,
}
def render_snippet_as_partial(name, *, scope=None, ignore_if_unknown=False):
"""Render the latest version of the snippet with the given name and
return the result.
"""
if scope is None:
scope = Scope.for_site(g.site_id)
current_version = snippet_service \
.find_current_version_of_snippet_with_name(scope, name)
if current_version is None:
if ignore_if_unknown:
return ''
else:
raise SnippetNotFound(name)
return _render_template(current_version.body)
def _render_template(source):
template = _load_template_with_globals(source)
return template.render()
def _load_template_with_globals(source):
template_globals = {
'render_snippet': render_snippet_as_partial,
'url_for': url_for,
}
return load_template(source, template_globals=template_globals)
| """
byceps.blueprints.snippet.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import sys
import traceback
from flask import abort, g, render_template, url_for
from jinja2 import TemplateNotFound
from ...services.snippet import service as snippet_service
from ...services.snippet.service import SnippetNotFound
from ...services.snippet.transfer.models import Scope
from ...util.templating import get_variable_value, load_template
def render_snippet_as_page(version):
"""Render the given version of the snippet, or an error page if
that fails.
"""
try:
context = get_snippet_context(version)
return render_template('snippet/view.html', **context)
except TemplateNotFound:
abort(404)
except Exception as e:
print('Error in snippet markup:', e, file=sys.stderr)
traceback.print_exc()
context = {
'message': str(e),
}
return render_template('snippet/error.html', **context), 500
def get_snippet_context(version):
"""Return the snippet context to insert into the outer template."""
template = _load_template_with_globals(version.body)
current_page = get_variable_value(template, 'current_page')
title = version.title
head = _render_template(version.head) if version.head else None
body = template.render()
return {
'title': title,
'current_page': current_page,
'head': head,
'body': body,
}
def render_snippet_as_partial(name, *, ignore_if_unknown=False):
"""Render the latest version of the snippet with the given name and
return the result.
"""
scope = Scope.for_site(g.site_id)
current_version = snippet_service \
.find_current_version_of_snippet_with_name(scope, name)
if current_version is None:
if ignore_if_unknown:
return ''
else:
raise SnippetNotFound(name)
return _render_template(current_version.body)
def _render_template(source):
template = _load_template_with_globals(source)
return template.render()
def _load_template_with_globals(source):
template_globals = {
'render_snippet': render_snippet_as_partial,
'url_for': url_for,
}
return load_template(source, template_globals=template_globals)
| bsd-3-clause | Python |
661c59cf46281540c4aac9616267a350401fae81 | Use subprocess.SubprocessError | yashtrivedi96/coala,Nosferatul/coala,ManjiriBirajdar/coala,Nosferatul/coala,netman92/coala,impmihai/coala,nemaniarjun/coala,yland/coala,Shade5/coala,NalinG/coala,NalinG/coala,tltuan/coala,NiklasMM/coala,jayvdb/coala,netman92/coala,vinc456/coala,Asalle/coala,arjunsinghy96/coala,JohnS-01/coala,RJ722/coala,coala-analyzer/coala,AbdealiJK/coala,ManjiriBirajdar/coala,Shade5/coala,djkonro/coala,Asnelchristian/coala,shreyans800755/coala,NalinG/coala,MattAllmendinger/coala,svsn2117/coala,aptrishu/coala,swatilodha/coala,tushar-rishav/coala,scottbelden/coala,nemaniarjun/coala,MattAllmendinger/coala,d6e/coala,yland/coala,sagark123/coala,NalinG/coala,meetmangukiya/coala,abhiroyg/coala,sils1297/coala,coala/coala,damngamerz/coala,rimacone/testing2,arush0311/coala,stevemontana1980/coala,Nosferatul/coala,rimacone/testing2,rresol/coala,arafsheikh/coala,aptrishu/coala,sudheesh001/coala,NiklasMM/coala,Balaji2198/coala,mr-karan/coala,svsn2117/coala,RJ722/coala,mr-karan/coala,MariosPanag/coala,sophiavanvalkenburg/coala,tushar-rishav/coala,rresol/coala,Asalle/coala,meetmangukiya/coala,damngamerz/coala,NalinG/coala,coala-analyzer/coala,djkonro/coala,stevemontana1980/coala,abhiroyg/coala,incorrectusername/coala,rimacone/testing2,swatilodha/coala,sils1297/coala,yashtrivedi96/coala,refeed/coala,arjunsinghy96/coala,karansingh1559/coala,AbdealiJK/coala,yashLadha/coala,dagdaggo/coala,svsn2117/coala,kartikeys98/coala,AdeshAtole/coala,SanketDG/coala,MariosPanag/coala,ayushin78/coala,arush0311/coala,lonewolf07/coala,aptrishu/coala,stevemontana1980/coala,SanketDG/coala,jayvdb/coala,kartikeys98/coala,swatilodha/coala,arjunsinghy96/coala,vinc456/coala,SanketDG/coala,abhiroyg/coala,saurabhiiit/coala,scottbelden/coala,netman92/coala,dagdaggo/coala,CruiseDevice/coala,sagark123/coala,sudheesh001/coala,mr-karan/coala,arush0311/coala,impmihai/coala,JohnS-01/coala,nemaniarjun/coala,tltuan/coala,ayushin78/coala,shreyans800755/coala,JohnS-01/coala,scottbelden/coala,vinc456/coala,CruiseDevice/coala,Asalle/coala,MariosPanag/coala,incorrectusername/coala,sophiavanvalkenburg/coala,sils1297/coala,arafsheikh/coala,meetmangukiya/coala,arafsheikh/coala,sudheesh001/coala,karansingh1559/coala,sophiavanvalkenburg/coala,RJ722/coala,karansingh1559/coala,yland/coala,NalinG/coala,dagdaggo/coala,Asnelchristian/coala,refeed/coala,Uran198/coala,d6e/coala,jayvdb/coala,Asnelchristian/coala,d6e/coala,saurabhiiit/coala,AdeshAtole/coala,AdeshAtole/coala,MattAllmendinger/coala,ayushin78/coala,djkonro/coala,coala/coala,Uran198/coala,yashLadha/coala,Balaji2198/coala,tushar-rishav/coala,lonewolf07/coala,coala-analyzer/coala,incorrectusername/coala,sagark123/coala,shreyans800755/coala,yashLadha/coala,saurabhiiit/coala,Balaji2198/coala,refeed/coala,NalinG/coala,Uran198/coala,coala/coala,CruiseDevice/coala,yashtrivedi96/coala,AbdealiJK/coala,damngamerz/coala,ManjiriBirajdar/coala,NiklasMM/coala,rresol/coala,lonewolf07/coala,kartikeys98/coala,tltuan/coala,Shade5/coala,impmihai/coala | coalib/output/printers/EspeakPrinter.py | coalib/output/printers/EspeakPrinter.py | import subprocess
from pyprint.ClosableObject import ClosableObject
from pyprint.Printer import Printer
from coalib.misc import Constants
class EspeakPrinter(Printer, ClosableObject):
def __init__(self):
"""
Raises EnvironmentError if VoiceOutput is impossible.
"""
Printer.__init__(self)
ClosableObject.__init__(self)
# TODO retrieve language from get_locale and select appropriate voice
try:
self.espeak = subprocess.Popen(['espeak'], stdin=subprocess.PIPE)
except OSError: # pragma: no cover
print("eSpeak doesn't seem to be installed. You cannot use the "
"voice output feature without eSpeak. It can be downloaded"
" from http://espeak.sourceforge.net/ or installed via "
"your usual package repositories.")
raise EnvironmentError
except subprocess.SubprocessError: # pragma: no cover
print("Failed to execute eSpeak. An unknown error occurred.",
Constants.THIS_IS_A_BUG)
raise EnvironmentError
def _close(self):
self.espeak.stdin.close()
def _print(self, output, **kwargs):
self.espeak.stdin.write(output.encode())
self.espeak.stdin.flush()
| import subprocess
from pyprint.ClosableObject import ClosableObject
from pyprint.Printer import Printer
from coalib.misc import Constants
class EspeakPrinter(Printer, ClosableObject):
def __init__(self):
"""
Raises EnvironmentError if VoiceOutput is impossible.
"""
Printer.__init__(self)
ClosableObject.__init__(self)
# TODO retrieve language from get_locale and select appropriate voice
try:
self.espeak = subprocess.Popen(['espeak'], stdin=subprocess.PIPE)
except OSError: # pragma: no cover
print("eSpeak doesn't seem to be installed. You cannot use the "
"voice output feature without eSpeak. It can be downloaded"
" from http://espeak.sourceforge.net/ or installed via "
"your usual package repositories.")
raise EnvironmentError
except: # pragma: no cover
print("Failed to execute eSpeak. An unknown error occurred.",
Constants.THIS_IS_A_BUG)
raise EnvironmentError
def _close(self):
self.espeak.stdin.close()
def _print(self, output, **kwargs):
self.espeak.stdin.write(output.encode())
self.espeak.stdin.flush()
| agpl-3.0 | Python |
9b894c2ea76c1a73eb5b313eed6b90aa192c23a1 | add explicit force endpoint for item refresh | pannal/Subliminal.bundle,pannal/Subliminal.bundle,pannal/Subliminal.bundle | Contents/Code/interface/refresh_item.py | Contents/Code/interface/refresh_item.py | # coding=utf-8
from subzero.constants import PREFIX
from menu_helpers import debounce, set_refresh_menu_state, route
from support.items import refresh_item
from support.helpers import timestamp
@route(PREFIX + '/item/refresh/{rating_key}/force', force=True)
@route(PREFIX + '/item/refresh/{rating_key}')
@debounce
def RefreshItem(rating_key=None, came_from="/recent", item_title=None, force=False, refresh_kind=None,
previous_rating_key=None, timeout=8000, randomize=None, trigger=True):
assert rating_key
from interface.main import fatality
header = " "
if trigger:
set_refresh_menu_state(u"Triggering %sRefresh for %s" % ("Force-" if force else "", item_title))
Thread.Create(refresh_item, rating_key=rating_key, force=force, refresh_kind=refresh_kind,
parent_rating_key=previous_rating_key, timeout=int(timeout))
header = u"%s of item %s triggered" % ("Refresh" if not force else "Forced-refresh", rating_key)
return fatality(randomize=timestamp(), header=header, replace_parent=True)
| # coding=utf-8
from subzero.constants import PREFIX
from menu_helpers import debounce, set_refresh_menu_state, route
from support.items import refresh_item
from support.helpers import timestamp
@route(PREFIX + '/item/refresh/{rating_key}')
@debounce
def RefreshItem(rating_key=None, came_from="/recent", item_title=None, force=False, refresh_kind=None,
previous_rating_key=None, timeout=8000, randomize=None, trigger=True):
assert rating_key
from interface.main import fatality
header = " "
if trigger:
set_refresh_menu_state(u"Triggering %sRefresh for %s" % ("Force-" if force else "", item_title))
Thread.Create(refresh_item, rating_key=rating_key, force=force, refresh_kind=refresh_kind,
parent_rating_key=previous_rating_key, timeout=int(timeout))
header = u"%s of item %s triggered" % ("Refresh" if not force else "Forced-refresh", rating_key)
return fatality(randomize=timestamp(), header=header, replace_parent=True)
| mit | Python |
40e56acb10da06aa83a6f9f5eee7dcd314a2102c | update pacbio tests | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | test/test_pacbio.py | test/test_pacbio.py | from sequana.pacbio import BAMPacbio
from sequana import sequana_data
from easydev import TempFile
def test_pacbio():
b = BAMPacbio(sequana_data("test_pacbio_subreads.bam"))
assert len(b) == 130
b.df
assert b.nb_pass[1] == 130
with TempFile() as fh:
b.filter_length(fh.name, threshold_min=500)
print(b) # check length
b.stats
# test nb_pass from scratch
b = BAMPacbio(sequana_data("test_pacbio_subreads.bam"))
b.nb_pass
# test hist_snr from scratch
b._df = None
b.hist_snr()
# test hist_len from scratch
b._df = None
b.hist_len()
# test from scratch
b._df = None
b.hist_GC()
# test from scratch
b._df = None
b.plot_GC_read_len()
# test from scratch
b._df = None
b._nb_pass = None
b.hist_ZMW_subreads()
with TempFile() as fh:
b.to_fasta(fh.name, threads=1)
with TempFile() as fh:
b.to_fastq(fh.name, threads=1)
def test_pacbio_stride():
b = BAMPacbio(sequana_data("test_pacbio_subreads.bam"))
with TempFile() as fh:
b.stride(fh.name, stride=2)
with TempFile() as fh:
b.stride(fh.name, stride=2, random=True)
| from sequana.pacbio import BAMPacbio
from sequana import sequana_data
from easydev import TempFile
def test_pacbio():
b = BAMPacbio(sequana_data("test_pacbio_subreads.bam"))
assert len(b) == 130
b.df
assert b.nb_pass[1] == 130
with TempFile() as fh:
b.filter_length(fh.name, threshold_min=500)
print(b) # check length
b.stats
# test nb_pass from scratch
b = BAMPacbio(sequana_data("test_pacbio_subreads.bam"))
b.nb_pass
# test hist_snr from scratch
b._df = None
b.hist_snr()
# test hist_len from scratch
b._df = None
b.hist_len()
# test from scratch
b._df = None
b.hist_GC()
# test from scratch
b._df = None
b.plot_GC_read_len()
# test from scratch
b._df = None
b._nb_pass = None
b.hist_ZMW_subreads()
def test_pacbio_stride():
b = BAMPacbio(sequana_data("test_pacbio_subreads.bam"))
with TempFile() as fh:
b.stride(fh.name, stride=2)
with TempFile() as fh:
b.stride(fh.name, stride=2, random=True)
| bsd-3-clause | Python |
0842789d7d0aeff396f98e83967c9aaabf2f6a4e | Fix fixture in test_margin() | ramnes/qtile,ramnes/qtile,qtile/qtile,qtile/qtile | test/test_window.py | test/test_window.py | import pytest
from test.conftest import BareConfig
bare_config = pytest.mark.parametrize("manager", [BareConfig], indirect=True)
@bare_config
def test_margin(manager):
manager.test_window('one')
# No margin
manager.c.window.place(10, 20, 50, 60, 0, '000000')
assert manager.c.window.info()['x'] == 10
assert manager.c.window.info()['y'] == 20
assert manager.c.window.info()['width'] == 50
assert manager.c.window.info()['height'] == 60
# Margin as int
manager.c.window.place(10, 20, 50, 60, 0, '000000', margin=8)
assert manager.c.window.info()['x'] == 18
assert manager.c.window.info()['y'] == 28
assert manager.c.window.info()['width'] == 34
assert manager.c.window.info()['height'] == 44
# Margin as list
manager.c.window.place(10, 20, 50, 60, 0, '000000', margin=[2, 4, 8, 10])
assert manager.c.window.info()['x'] == 20
assert manager.c.window.info()['y'] == 22
assert manager.c.window.info()['width'] == 36
assert manager.c.window.info()['height'] == 50
| import pytest
from test.conftest import BareConfig
bare_config = pytest.mark.parametrize("qtile", [BareConfig], indirect=True)
@bare_config
def test_margin(qtile):
qtile.test_window('one')
# No margin
qtile.c.window.place(10, 20, 50, 60, 0, '000000')
assert qtile.c.window.info()['x'] == 10
assert qtile.c.window.info()['y'] == 20
assert qtile.c.window.info()['width'] == 50
assert qtile.c.window.info()['height'] == 60
# Margin as int
qtile.c.window.place(10, 20, 50, 60, 0, '000000', margin=8)
assert qtile.c.window.info()['x'] == 18
assert qtile.c.window.info()['y'] == 28
assert qtile.c.window.info()['width'] == 34
assert qtile.c.window.info()['height'] == 44
# Margin as list
qtile.c.window.place(10, 20, 50, 60, 0, '000000', margin=[2, 4, 8, 10])
assert qtile.c.window.info()['x'] == 20
assert qtile.c.window.info()['y'] == 22
assert qtile.c.window.info()['width'] == 36
assert qtile.c.window.info()['height'] == 50
| mit | Python |
cea891a2de493ce211ccf13f4bf0c487f945985d | Print out the new track id. | Rosuav/appension,MikeiLL/appension,MikeiLL/appension,MikeiLL/appension,Rosuav/appension,MikeiLL/appension,Rosuav/appension,Rosuav/appension | test_audio_files.py | test_audio_files.py | import fore.apikeys
import fore.mixer
import fore.database
import pyechonest.track
for file in fore.database.get_many_mp3(status='all'):
print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
track = track.track_from_filename('audio/'+file.filename, force_upload=True)
print(track.id)
| import fore.apikeys
import fore.mixer
import fore.database
import pyechonest.track
for file in fore.database.get_many_mp3(status='all'):
print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
track.track_from_filename('audio/'+file.filename, force_upload=True)
| artistic-2.0 | Python |
e01a4a7ca346401a66e802eb9b2d8190e65fd606 | Update test_get_image6d.py | sebi06/BioFormatsRead | test_get_image6d.py | test_get_image6d.py | # -*- coding: utf-8 -*-
"""
@author: Sebi
File: test_get_image6d.py
Date: 02.05.2017
Version. 1.7
"""
from __future__ import print_function
import numpy as np
import bftools as bf
#filename = r'testdata/Beads_63X_NA1.35_xy=0.042_z=0.1.czi'
#filename = r'testdata/T=5_Z=3_CH=2_CZT_All_CH_per_Slice.czi'
#filename = r'testdata/B4_B5_S=8_4Pos_perWell_T=2_Z=1_CH=1.czi'
filename = r'/home/sebi06/Dokumente/Image_Datasets/2x2_SNAP_CH=2_Z=5.czi'
# use for BioFormtas <= 5.1.10
#urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2015-01'
# use for BioFormtas > 5.2.0
urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2016-06'
# specify bioformats_package.jar to use if required
bfpackage = r'bfpackage/5.4.1/bioformats_package.jar'
bf.set_bfpath(bfpackage)
# get image meta-information
MetaInfo = bf.get_relevant_metainfo_wrapper(filename, namespace=urlnamespace, bfpath=bfpackage, showinfo=False)
try:
img6d, readstate = bf.get_image6d(filename, MetaInfo['Sizes'])
arrayshape = np.shape(img6d)
except:
arrayshape = []
print('Could not read image data into NumPy array.')
# show relevant image Meta-Information
bf.showtypicalmetadata(MetaInfo, namespace=urlnamespace, bfpath=bfpackage)
print('Array Shape : ', arrayshape)
| # -*- coding: utf-8 -*-
"""
@author: Sebi
File: test_get_image6d.py
Date: 02.05.2017
Version. 1.7
"""
from __future__ import print_function
import numpy as np
import bftools as bf
#filename = r'testdata/Beads_63X_NA1.35_xy=0.042_z=0.1.czi'
#filename = r'testdata/T=5_Z=3_CH=2_CZT_All_CH_per_Slice.czi'
#filename = r'testdata/B4_B5_S=8_4Pos_perWell_T=2_Z=1_CH=1.czi'
filename = r'/home/sebi06/Dokumente/Image_Datasets/2x2_SNAP_CH=2_Z=5.czi'
# use for BioFormtas <= 5.1.10
#urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2015-01'
# use for BioFormtas > 5.2.0
urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2016-06'
# specify bioformats_package.jar to use if required
bfpackage = r'bioformats_package/5.4.1/bioformats_package.jar'
bf.set_bfpath(bfpackage)
# get image meta-information
MetaInfo = bf.get_relevant_metainfo_wrapper(filename, namespace=urlnamespace, bfpath=bfpackage, showinfo=False)
try:
img6d, readstate = bf.get_image6d(filename, MetaInfo['Sizes'])
arrayshape = np.shape(img6d)
except:
arrayshape = []
print('Could not read image data into NumPy array.')
# show relevant image Meta-Information
bf.showtypicalmetadata(MetaInfo, namespace=urlnamespace, bfpath=bfpackage)
print('Array Shape : ', arrayshape)
| bsd-2-clause | Python |
47279e417720b94efa35601561822ecab44fb7b6 | Update to the unit-test for slicing dataframes and expressions | maartenbreddels/vaex,maartenbreddels/vaex,maartenbreddels/vaex,maartenbreddels/vaex,maartenbreddels/vaex,maartenbreddels/vaex | tests/slice_test.py | tests/slice_test.py | from common import *
def test_slice_expression(df):
assert df.x[:2].tolist() == df[:2].x.tolist()
assert df.x[2:6].tolist() == df[2:6].x.tolist()
assert df.x[-3:].tolist() == df[-3:].x.tolist()
assert df.x[::-3].tolist() == df[::-3].x.tolist()
def test_slice_against_numpy(df):
assert df.x[:2].tolist() == df.x.values[:2].tolist()
assert df.x[2:6].tolist() == df.x.values[2:6].tolist()
assert df.x[-3:].tolist() == df.x.values[-3:].tolist()
assert df.x[::-3].tolist() == df.x.values[::-3].tolist()
def test_slice(ds_local):
ds = ds_local
ds_sliced = ds[:]
assert ds_sliced.length_original() == ds_sliced.length_unfiltered() >= 10
assert ds_sliced.get_active_range() == (0, ds_sliced.length_original())# == (0, 10)
assert ds_sliced.x.tolist() == np.arange(10.).tolist()
# trimming with a non-zero start index
ds_sliced = ds[5:]
assert ds_sliced.length_original() == ds_sliced.length_unfiltered() == 5
assert ds_sliced.get_active_range() == (0, ds_sliced.length_original()) == (0, 5)
assert ds_sliced.x.tolist() == np.arange(5, 10.).tolist()
# slice on slice
ds_sliced = ds_sliced[1:4]
assert ds_sliced.length_original() == ds_sliced.length_unfiltered() == 3
assert ds_sliced.get_active_range() == (0, ds_sliced.length_original()) == (0, 3)
assert ds_sliced.x.tolist() == np.arange(6, 9.).tolist()
def test_head(ds_local):
ds = ds_local
df = ds.head(5)
assert len(df) == 5
def test_tail(ds_local):
ds = ds_local
df = ds.tail(5)
assert len(df) == 5
def test_head_with_selection():
df = vaex.example()
df.select(df.x > 0, name='test')
df.head()
def test_slice_beyond_end(df):
df2 = df[:100]
assert df2.x.tolist() == df.x.tolist()
assert len(df2) == len(df)
def test_slice_negative(df):
df2 = df[:-1]
assert df2.x.tolist() == df.x.values[:-1].tolist()
assert len(df2) == len(df)-1
| from common import *
def test_slice_expression(df):
assert df.x[:2].tolist() == df[:2].x.tolist()
assert df.x[2:6].tolist() == df[2:6].x.tolist()
def test_slice(ds_local):
ds = ds_local
ds_sliced = ds[:]
assert ds_sliced.length_original() == ds_sliced.length_unfiltered() >= 10
assert ds_sliced.get_active_range() == (0, ds_sliced.length_original())# == (0, 10)
assert ds_sliced.x.tolist() == np.arange(10.).tolist()
# trimming with a non-zero start index
ds_sliced = ds[5:]
assert ds_sliced.length_original() == ds_sliced.length_unfiltered() == 5
assert ds_sliced.get_active_range() == (0, ds_sliced.length_original()) == (0, 5)
assert ds_sliced.x.tolist() == np.arange(5, 10.).tolist()
# slice on slice
ds_sliced = ds_sliced[1:4]
assert ds_sliced.length_original() == ds_sliced.length_unfiltered() == 3
assert ds_sliced.get_active_range() == (0, ds_sliced.length_original()) == (0, 3)
assert ds_sliced.x.tolist() == np.arange(6, 9.).tolist()
def test_head(ds_local):
ds = ds_local
df = ds.head(5)
assert len(df) == 5
def test_tail(ds_local):
ds = ds_local
df = ds.tail(5)
assert len(df) == 5
def test_head_with_selection():
df = vaex.example()
df.select(df.x > 0, name='test')
df.head()
def test_slice_beyond_end(df):
df2 = df[:100]
assert df2.x.tolist() == df.x.tolist()
assert len(df2) == len(df)
def test_slice_negative(df):
df2 = df[:-1]
assert df2.x.tolist() == df.x.values[:-1].tolist()
assert len(df2) == len(df)-1
| mit | Python |
1a261b80dd42b04885b07c40e96dea045fe09e58 | Add tests for the b and c operators. | nitbix/PyAsync | tests/test_async.py | tests/test_async.py | import wasync
from wasync import *
import datetime as dt
import time
import sys
import unittest
sys.path.append(".")
def longfunc(x):
time.sleep(x)
return x
class TestBasicFunctionality(unittest.TestCase):
def setUp(self):
self.scheduler = wasync.go()
def testDeferredAwait(self):
f1 = deferred(lambda: longfunc(0.1))
f1.await()
self.assertEqual(f1.await(),0.1,'Deferred.await() is broken')
def testCoreAwait(self):
f1 = deferred(lambda: longfunc(0.1))
self.assertEqual(await(f1),0.1,'await() is broken')
def testConcurrency(self):
start = time.time()
f1 = deferred(lambda: longfunc(0.2))
f2 = deferred(lambda: longfunc(0.4))
f1.await()
after_f1 = time.time()
f2.await()
after_f2 = time.time()
self.assertLess(after_f1 - start, 0.3, 'concurrency is broken')
self.assertLess(after_f2 - start, 0.6, 'concurrency is broken')
f1 = deferred(lambda: longfunc(0.2))
f2 = deferred(lambda: longfunc(0.4))
self.assertEqual(await_first([f1,f2]),0.2,'await_first() is broken')
self.assertListEqual(await_all([f1,f2]),[0.2,0.4],'await_all() is broken')
def testBind(self):
f1 = determined(1)
f2 = f1.bind(lambda x: x + 1)
self.assertEqual(f2,2)
def testChain(self):
f1 = determined(1)
f2 = f1.chain(lambda x: x + 1)
self.assertEqual(f2.await(),2)
def testBindOperator(self):
f1 = determined(1)
f2 = f1 |b| (lambda x: x + 1)
self.assertEqual(f2,2)
def testChainOperator(self):
f1 = determined(1)
f2 = f1 |c| (lambda x: x + 1)
self.assertEqual(f2.await(),2)
if __name__ == '__main__':
unittest.main()
| import wasync
from wasync import *
import datetime as dt
import time
import sys
import unittest
sys.path.append(".")
def longfunc(x):
time.sleep(x)
return x
class TestBasicFunctionality(unittest.TestCase):
def setUp(self):
self.scheduler = wasync.go()
def testDeferredAwait(self):
f1 = deferred(lambda: longfunc(0.1))
f1.await()
self.assertEqual(f1.await(),0.1,'Deferred.await() is broken')
def testCoreAwait(self):
f1 = deferred(lambda: longfunc(0.1))
self.assertEqual(await(f1),0.1,'await() is broken')
def testConcurrency(self):
start = time.time()
f1 = deferred(lambda: longfunc(0.2))
f2 = deferred(lambda: longfunc(0.4))
f1.await()
after_f1 = time.time()
f2.await()
after_f2 = time.time()
self.assertLess(after_f1 - start, 0.3, 'concurrency is broken')
self.assertLess(after_f2 - start, 0.6, 'concurrency is broken')
f1 = deferred(lambda: longfunc(0.2))
f2 = deferred(lambda: longfunc(0.4))
self.assertEqual(await_first([f1,f2]),0.2,'await_first() is broken')
self.assertListEqual(await_all([f1,f2]),[0.2,0.4],'await_all() is broken')
def testBind(self):
f1 = determined(1)
f2 = f1.bind(lambda x: x + 1)
self.assertEqual(f2,2)
def testChain(self):
f1 = determined(1)
f2 = f1.chain(lambda x: x + 1)
self.assertEqual(f2.await(),2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
cad0e8b11f10a847a94e7cc767ea31d80183c574 | Fix up tests | takluyver/nbparameterise | tests/test_basic.py | tests/test_basic.py | import os.path
import unittest
import nbformat
from nbparameterise import code
samplenb = os.path.join(os.path.dirname(__file__), 'sample.ipynb')
class BasicTestCase(unittest.TestCase):
def setUp(self):
with open(samplenb) as f:
self.nb = nbformat.read(f, as_version=4)
self.params = code.extract_parameters(self.nb)
def test_extract(self):
a = self.params[0]
assert a.name == 'a'
assert a.type == str
assert a.value == "Some text"
b = self.params[1]
assert b.name == 'b'
assert b.type == int
assert b.value == 12
c = self.params[2]
assert c.name == 'c'
assert c.type == float
assert c.value == 14.0
assert c.metadata['display_name'] == 'Sea'
d = self.params[3]
assert d.name == 'd'
assert d.type == bool
assert d.value == False
def test_rebuild(self):
from_form = [
self.params[0].with_value("New text"),
self.params[1].with_value(21),
self.params[2].with_value(0.25),
self.params[3].with_value(True),
]
nb = code.replace_definitions(self.nb, from_form, execute=False)
ns = {}
exec(nb.cells[0].source, ns)
assert ns['a'] == "New text"
assert ns['b'] == 21
assert ns['c'] == 0.25
assert ns['d'] == True
| import os.path
import unittest
from IPython.nbformat import current as nbformat
from nbparameterise import code
samplenb = os.path.join(os.path.dirname(__file__), 'sample.ipynb')
class BasicTestCase(unittest.TestCase):
def setUp(self):
with open(samplenb) as f:
self.nb = nbformat.read(f, 'ipynb')
self.params = code.extract_parameters(self.nb)
def test_extract(self):
a = self.params[0]
assert a.name == 'a'
assert a.type == str
assert a.value == "Some text"
b = self.params[1]
assert b.name == 'b'
assert b.type == int
assert b.value == 12
c = self.params[2]
assert c.name == 'c'
assert c.type == float
assert c.value == 14.0
assert c.metadata['display_name'] == 'Sea'
d = self.params[3]
assert d.name == 'd'
assert d.type == bool
assert d.value == False
def test_rebuild(self):
from_form = [
self.params[0].with_value("New text"),
self.params[1].with_value(21),
self.params[2].with_value(0.25),
self.params[3].with_value(True),
]
code.replace_definitions(self.nb, from_form)
ns = {}
exec(self.nb.worksheets[0].cells[0].input, ns)
assert ns['a'] == "New text"
assert ns['b'] == 21
assert ns['c'] == 0.25
assert ns['d'] == True
| mit | Python |
550b48c0d1b464a782d875e30da140c742cd4b3e | Replace assertGreaterEqual with assertTrue(a >= b) | mozilla/spicedham,mozilla/spicedham | tests/test_bayes.py | tests/test_bayes.py | from unittest import TestCase
from itertools import repeat, imap, izip, cycle
from spicedham.bayes import Bayes
from spicedham import Spicedham
class TestBayes(TestCase):
def test_classify(self):
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
alphabet = map(chr, range(97, 123))
for letter in alphabet:
p = b.classify(letter)
self.assertEqual(p, 0.5)
def _training(self, bayes):
alphabet = map(chr, range(97, 123))
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
bayes.train(message, is_spam)
def test_train(self):
alphabet = map(chr, range(97, 123))
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
for letter in alphabet:
result = sh.backend.get_key(b.__class__.__name__, letter)
self.assertEqual(result, {'numTotal': 2, 'numSpam': 1})
self.assertTrue(result['numTotal'] >= result['numSpam'])
| from unittest import TestCase
from itertools import repeat, imap, izip, cycle
from spicedham.bayes import Bayes
from spicedham import Spicedham
class TestBayes(TestCase):
def test_classify(self):
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
alphabet = map(chr, range(97, 123))
for letter in alphabet:
p = b.classify(letter)
self.assertEqual(p, 0.5)
def _training(self, bayes):
alphabet = map(chr, range(97, 123))
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
bayes.train(message, is_spam)
def test_train(self):
alphabet = map(chr, range(97, 123))
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
for letter in alphabet:
result = sh.backend.get_key(b.__class__.__name__, letter)
self.assertEqual(result, {'numTotal': 2, 'numSpam': 1})
self.assertGreaterEqual(result['numTotal'], result['numSpam'])
| mpl-2.0 | Python |
c7002a39c232cde3088570ff82b436d4ead8130c | put coding line in test_bytes | rspeer/python-ftfy | tests/test_bytes.py | tests/test_bytes.py | # coding: utf-8
from ftfy.guess_bytes import guess_bytes
from nose.tools import eq_
TEST_ENCODINGS = [
'utf-16', 'utf-8', 'sloppy-windows-1252'
]
TEST_STRINGS = [
u'Renée\nFleming', u'Noël\nCoward', u'Señor\nCardgage',
u'€ • £ • ¥', u'¿Qué?'
]
def check_bytes_decoding(string):
for encoding in TEST_ENCODINGS:
result_str, result_encoding = guess_bytes(string.encode(encoding))
eq_(result_str, string)
eq_(result_encoding, encoding)
if '\n' in string:
old_mac_bytes = string.replace('\n', '\r').encode('macroman')
result_str, result_encoding = guess_bytes(old_mac_bytes)
eq_(result_str, string.replace('\n', '\r'))
def test_guess_bytes():
for string in TEST_STRINGS:
yield check_bytes_decoding, string
bowdlerized_null = b'null\xc0\x80separated'
result_str, result_encoding = guess_bytes(bowdlerized_null)
eq_(result_str, u'null\x00separated')
eq_(result_encoding, u'utf-8-variants')
| from ftfy.guess_bytes import guess_bytes
from nose.tools import eq_
TEST_ENCODINGS = [
'utf-16', 'utf-8', 'sloppy-windows-1252'
]
TEST_STRINGS = [
u'Renée\nFleming', u'Noël\nCoward', u'Señor\nCardgage',
u'€ • £ • ¥', u'¿Qué?'
]
def check_bytes_decoding(string):
for encoding in TEST_ENCODINGS:
result_str, result_encoding = guess_bytes(string.encode(encoding))
eq_(result_str, string)
eq_(result_encoding, encoding)
if '\n' in string:
old_mac_bytes = string.replace('\n', '\r').encode('macroman')
result_str, result_encoding = guess_bytes(old_mac_bytes)
eq_(result_str, string.replace('\n', '\r'))
def test_guess_bytes():
for string in TEST_STRINGS:
yield check_bytes_decoding, string
bowdlerized_null = b'null\xc0\x80separated'
result_str, result_encoding = guess_bytes(bowdlerized_null)
eq_(result_str, u'null\x00separated')
eq_(result_encoding, u'utf-8-variants')
| mit | Python |
8a46ef083380f481f15f46f7b53f6af95f4a9025 | Test that date attributes are parsed, not computed | guoguo12/billboard-charts,guoguo12/billboard-charts,jameswenzel/billboard-charts,jameswenzel/billboard-charts | tests/test_dates.py | tests/test_dates.py | import datetime
import billboard
from nose.tools import raises
def test_date_rounding():
"""Checks that the Billboard website is rounding dates correctly: it should
round up to the nearest date on which a chart was published.
"""
chart = billboard.ChartData('hot-100', date='1000-10-10')
assert chart.date == '1958-08-04' # The first Hot 100 chart
chart = billboard.ChartData('hot-100', date='1996-07-30')
assert chart.date == '1996-08-03'
def test_previous_next():
"""Checks that the date, previousDate, and nextDate attributes are parsed
from the HTML, not computed. Specifically, we shouldn't assume charts are
always published seven days apart, since (as this example demonstrates)
this is not true.
"""
chart = billboard.ChartData('hot-100', date='1962-01-06')
assert chart.date == '1962-01-06'
assert chart.previousDate == '1961-12-25'
chart = billboard.ChartData('hot-100', date='1961-12-25')
assert chart.date == '1961-12-25'
assert chart.nextDate == '1962-01-06'
def test_datetime_date():
"""Checks that ChartData correctly handles datetime objects as the
date parameter.
"""
chart = billboard.ChartData('hot-100', datetime.date(2016, 7, 9))
assert len(chart) == 100
assert chart.date == '2016-07-09'
@raises(ValueError)
def test_unsupported_date_format():
"""Checks that using an unsupported date format raises an exception."""
billboard.ChartData('hot-100', date='07-30-1996')
@raises(ValueError)
def test_empty_string_date():
"""Checks that passing an empty string as the date raises an exception."""
billboard.ChartData('hot-100', date='')
| import datetime
import billboard
from nose.tools import raises
def test_date_rounding():
"""Checks that the Billboard website is rounding dates correctly: it should
round up to the nearest date on which a chart was published.
"""
chart = billboard.ChartData('hot-100', date='1000-10-10')
assert chart.date == '1958-08-04' # The first Hot 100 chart
chart = billboard.ChartData('hot-100', date='1996-07-30')
assert chart.date == '1996-08-03'
def test_datetime_date():
"""Checks that ChartData correctly handles datetime objects as the
date parameter.
"""
chart = billboard.ChartData('hot-100', datetime.date(2016, 7, 9))
assert len(chart) == 100
assert chart.date == '2016-07-09'
@raises(ValueError)
def test_unsupported_date_format():
"""Checks that using an unsupported date format raises an exception."""
billboard.ChartData('hot-100', date='07-30-1996')
@raises(ValueError)
def test_empty_string_date():
"""Checks that passing an empty string as the date raises an exception."""
billboard.ChartData('hot-100', date='')
| mit | Python |
d26e1b0d82f8b00023c2ad0365b83c25e08678fa | Expand entry equality tests | MisanthropicBit/bibpy,MisanthropicBit/bibpy | tests/test_entry.py | tests/test_entry.py | # -*- coding: utf-8 -*-
"""Test the entry class."""
import bibpy
import bibpy.entry
import pytest
@pytest.fixture
def test_entry():
return bibpy.entry.Entry('article', 'key')
def test_formatting(test_entry):
assert test_entry.format() == '@article{key,\n}'
def test_properties(test_entry):
assert test_entry.entry_type == 'article'
assert test_entry.entry_key == 'key'
assert test_entry.aliases('bibtex') == []
test_entry.author = 'Author'
test_entry.title = 'Title'
assert test_entry.get('author') == 'Author'
assert test_entry.get('uobdrg', None) is None
assert set(test_entry.keys()) == set(['author', 'title'])
assert set(test_entry.values()) == set(['Author', 'Title'])
test_entry.clear()
assert not test_entry.fields
assert test_entry.author is None
assert test_entry.title is None
test_entry.journaltitle = 'Journaltitle'
assert test_entry.journaltitle == 'Journaltitle'
del test_entry['journaltitle']
assert test_entry.journaltitle is None
assert not test_entry.valid('bibtex')
assert not test_entry.valid('biblatex')
# assert not test_entry.valid('mixed')
assert test_entry.valid('relaxed')
with pytest.raises(ValueError):
test_entry.valid('seoligh')
test_entry['author'] = 'Author'
assert len(test_entry) == 1
assert repr(test_entry) == "Entry(type=article, key=key)"
assert test_entry['lhsfeslkj'] is None
for fmt in ['bibtex', 'biblatex']:
with pytest.raises(bibpy.error.RequiredFieldError):
test_entry.validate(fmt)
entry2 = bibpy.entry.Entry('manual', 'key2')
assert test_entry != entry2
# Entries are not equal to other types
assert not entry2 == "Hello"
# Entries must have the same entry type and key to be equal
entry2.author = 'Johnson'
test_entry.author = 'Johnson'
assert test_entry != entry2
entry2.entry_type = 'article'
entry2.entry_key = 'key'
entry2.author = 'Johnson'
# Entries must have the same fields
test_entry.author = 'Johnson'
test_entry.year = 2007
assert test_entry != entry2
# Entries must have the same contents in their fields
entry2.author = 'johnson'
entry2.year = 2007
assert test_entry != entry2
| # -*- coding: utf-8 -*-
"""Test the entry class."""
import bibpy
import bibpy.entry
import pytest
@pytest.fixture
def test_entry():
return bibpy.entry.Entry('article', 'key')
def test_formatting(test_entry):
assert test_entry.format() == '@article{key,\n}'
def test_properties(test_entry):
assert test_entry.entry_type == 'article'
assert test_entry.entry_key == 'key'
assert test_entry.aliases('bibtex') == []
test_entry.author = 'Author'
test_entry.title = 'Title'
assert test_entry.get('author') == 'Author'
assert test_entry.get('uobdrg', None) is None
assert set(test_entry.keys()) == set(['author', 'title'])
assert set(test_entry.values()) == set(['Author', 'Title'])
test_entry.clear()
assert not test_entry.fields
assert test_entry.author is None
assert test_entry.title is None
test_entry.journaltitle = 'Journaltitle'
assert test_entry.journaltitle == 'Journaltitle'
del test_entry['journaltitle']
assert test_entry.journaltitle is None
assert not test_entry.valid('bibtex')
assert not test_entry.valid('biblatex')
# assert not test_entry.valid('mixed')
assert test_entry.valid('relaxed')
with pytest.raises(ValueError):
test_entry.valid('seoligh')
test_entry['author'] = 'Author'
assert len(test_entry) == 1
assert repr(test_entry) == "Entry(type=article, key=key)"
assert test_entry['lhsfeslkj'] is None
for fmt in ['bibtex', 'biblatex']:
with pytest.raises(bibpy.error.RequiredFieldError):
test_entry.validate(fmt)
entry2 = bibpy.entry.Entry('manual', 'key2')
assert test_entry != entry2
entry2.author = 'Johnson'
test_entry.author = 'Johnson'
assert test_entry != entry2
| mit | Python |
2b0b574bc6503a7318c9ae24530f641b59aaa8e0 | Test separate user and password | TwoBitAlchemist/NeoAlchemy | tests/test_graph.py | tests/test_graph.py | """Graph (Connection Class) Tests"""
import pytest
from neoalchemy import Graph
def test_default_connection():
graph = Graph()
assert graph.driver.url == 'bolt://localhost'
assert graph.driver.host == 'localhost'
assert graph.driver.port is None
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'neo4j'
assert graph.driver.config['auth'].credentials == 'neo4j'
def test_http_connection():
with pytest.warns(UserWarning):
graph = Graph('http://localhost')
assert graph.driver.url == 'bolt://localhost'
assert graph.driver.host == 'localhost'
assert graph.driver.port is None
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'neo4j'
assert graph.driver.config['auth'].credentials == 'neo4j'
def test_auth_token_in_connection():
graph = Graph('bolt://user:pass@localhost')
assert graph.driver.url == 'bolt://localhost'
assert graph.driver.host == 'localhost'
assert graph.driver.port is None
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'user'
assert graph.driver.config['auth'].credentials == 'pass'
def test_full_connection_string():
graph = Graph('bolt://user:pass@localhost:7474')
assert graph.driver.url == 'bolt://localhost:7474'
assert graph.driver.host == 'localhost'
assert graph.driver.port == 7474
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'user'
assert graph.driver.config['auth'].credentials == 'pass'
def test_separate_user_pass():
graph = Graph('bolt://localhost:7474', user='user', password='pass')
assert graph.driver.url == 'bolt://localhost:7474'
assert graph.driver.host == 'localhost'
assert graph.driver.port == 7474
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'user'
assert graph.driver.config['auth'].credentials == 'pass'
| """Graph (Connection Class) Tests"""
import pytest
from neoalchemy import Graph
def test_default_connection():
graph = Graph()
assert graph.driver.url == 'bolt://localhost'
assert graph.driver.host == 'localhost'
assert graph.driver.port is None
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'neo4j'
assert graph.driver.config['auth'].credentials == 'neo4j'
def test_http_connection():
with pytest.warns(UserWarning):
graph = Graph('http://localhost')
assert graph.driver.url == 'bolt://localhost'
assert graph.driver.host == 'localhost'
assert graph.driver.port is None
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'neo4j'
assert graph.driver.config['auth'].credentials == 'neo4j'
def test_auth_token_in_connection():
graph = Graph('bolt://user:pass@localhost')
assert graph.driver.url == 'bolt://localhost'
assert graph.driver.host == 'localhost'
assert graph.driver.port is None
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'user'
assert graph.driver.config['auth'].credentials == 'pass'
def test_full_connection_string():
graph = Graph('bolt://user:pass@localhost:7474')
assert graph.driver.url == 'bolt://localhost:7474'
assert graph.driver.host == 'localhost'
assert graph.driver.port == 7474
assert graph.driver.config['auth'].scheme == 'basic'
assert graph.driver.config['auth'].principal == 'user'
assert graph.driver.config['auth'].credentials == 'pass'
| mit | Python |
c3a26ff2801e6f5233432a1869d98e5369a4db11 | add tests for truncate_table | knowledge-point/dj_anonymizer | tests/test_utils.py | tests/test_utils.py | import mock
import pytest
from django.contrib.auth.models import User
from django.db import DEFAULT_DB_ALIAS
from django.test.utils import override_settings
from dj_anonymizer.utils import import_if_exist, truncate_table
@pytest.mark.parametrize('path, expected', [
('hello', False),
('base', True),
])
def test_import_if_exist(mocker, path, expected):
with override_settings(
ANONYMIZER_MODEL_DEFINITION_DIR='example/anonymizer'
):
mocked_import = mock.MagicMock()
mocker.patch('importlib.import_module', mocked_import)
import_if_exist(path)
assert mocked_import.called is expected
@mock.patch('dj_anonymizer.utils.connections')
def test_truncate_table(mock_connections):
mock_cursor = mock_connections.\
__getitem__(DEFAULT_DB_ALIAS).\
cursor.return_value.__enter__.return_value
mock_connections.__getitem__(DEFAULT_DB_ALIAS).vendor = 'sqlite'
truncate_table(User)
mock_cursor.execute.assert_called_once_with('DELETE FROM "auth_user"')
mock_connections.__getitem__(DEFAULT_DB_ALIAS).vendor = 'dummy'
with pytest.raises(NotImplementedError):
truncate_table(User)
| import mock
import pytest
from django.test.utils import override_settings
from dj_anonymizer.utils import import_if_exist
@pytest.mark.parametrize('path, expected', [
('hello', False),
('base', True),
])
def test_import_if_exist(mocker, path, expected):
with override_settings(
ANONYMIZER_MODEL_DEFINITION_DIR='example/anonymizer'
):
mocked_import = mock.MagicMock()
mocker.patch('importlib.import_module', mocked_import)
import_if_exist(path)
assert mocked_import.called is expected
| mit | Python |
aec4647e7599979de06952739459376c87fdb7ee | switch mime-type from text/plain to kml | google-code-export/marinemap,google-code-export/marinemap,Alwnikrotikz/marinemap,Alwnikrotikz/marinemap,google-code-export/marinemap,Alwnikrotikz/marinemap,google-code-export/marinemap,Alwnikrotikz/marinemap | lingcod/studyregion/views.py | lingcod/studyregion/views.py | from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, HttpResponseServerError, HttpResponseForbidden
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from lingcod.common import mimetypes
from lingcod.common.utils import KmlWrap
from django.conf import settings
from lingcod.studyregion import models
def studyregion(request, template_name='studyregion/studyregion.html'):
"""Main application window
"""
return render_to_response(template_name, RequestContext(request,{'api_key':settings.GOOGLE_API_KEY}))
def regionKml(request):
"""Handler for AJAX regionKml request
"""
region = get_object_or_404( models.StudyRegion, pk=1 )
return HttpResponse( KmlWrap( region.kml(request.get_host()) ), content_type=mimetypes.KML)
def regionKmlChunk(request, n, s, e, w):
"""Handler for AJAX regionKml request
"""
region = get_object_or_404( models.StudyRegion, pk=1 )
return HttpResponse(
KmlWrap( '<Document>' + region.kml_chunk(float(n), float(s), float(e), float(w)) + '</Document>' ), content_type=mimetypes.KML)
def regionLookAtKml(request):
"""Handler for AJAX regionLookAtKml request
"""
region = get_object_or_404( models.StudyRegion, pk=1 )
return HttpResponse( KmlWrap( '<Document>' + region.lookAtKml() + '</Document>' ), content_type=mimetypes.KML ) | from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, HttpResponseServerError, HttpResponseForbidden
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from lingcod.common import mimetypes
from lingcod.common.utils import KmlWrap
from django.conf import settings
from lingcod.studyregion import models
def studyregion(request, template_name='studyregion/studyregion.html'):
"""Main application window
"""
return render_to_response(template_name, RequestContext(request,{'api_key':settings.GOOGLE_API_KEY}))
def regionKml(request):
"""Handler for AJAX regionKml request
"""
region = get_object_or_404( models.StudyRegion, pk=1 )
return HttpResponse( KmlWrap( region.kml(request.get_host()) ), content_type='text/plain')
def regionKmlChunk(request, n, s, e, w):
"""Handler for AJAX regionKml request
"""
region = get_object_or_404( models.StudyRegion, pk=1 )
return HttpResponse(
KmlWrap( '<Document>' + region.kml_chunk(float(n), float(s), float(e), float(w)) + '</Document>' ), content_type='text/plain')
def regionLookAtKml(request):
"""Handler for AJAX regionLookAtKml request
"""
region = get_object_or_404( models.StudyRegion, pk=1 )
return HttpResponse( KmlWrap( '<Document>' + region.lookAtKml() + '</Document>' ), content_type='text/plain' ) | bsd-3-clause | Python |
0c413737488895dcd7bcc55014e6d51fb3b29c7b | Update MWAA per 2021-06-21 changes | cloudtools/troposphere,cloudtools/troposphere | troposphere/mwaa.py | troposphere/mwaa.py | # Copyright (c) 2012-2018, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer
class UpdateError(AWSProperty):
props = {
"ErrorCode": (str, False),
"ErrorMessage": (str, False),
}
class LastUpdate(AWSProperty):
props = {
"CreatedAt": (str, False),
"Error": (UpdateError, False),
"Status": (str, False),
}
class ModuleLoggingConfiguration(AWSProperty):
props = {
"CloudWatchLogGroupArn": (str, False),
"Enabled": (boolean, False),
"LogLevel": (str, False),
}
class LoggingConfiguration(AWSProperty):
props = {
"DagProcessingLogs": (ModuleLoggingConfiguration, False),
"SchedulerLogs": (ModuleLoggingConfiguration, False),
"TaskLogs": (ModuleLoggingConfiguration, False),
"WebserverLogs": (ModuleLoggingConfiguration, False),
"WorkerLogs": (ModuleLoggingConfiguration, False),
}
class NetworkConfiguration(AWSProperty):
props = {
"SecurityGroupIds": (list, True),
"SubnetIds": (list, True),
}
class Environment(AWSObject):
resource_type = "AWS::MWAA::Environment"
props = {
"AirflowConfigurationOptions": (dict, False),
"AirflowVersion": (str, False),
"DagS3Path": (str, False),
"EnvironmentClass": (str, False),
"ExecutionRoleArn": (str, False),
"KmsKey": (str, False),
"LoggingConfiguration": (LoggingConfiguration, False),
"MaxWorkers": (integer, False),
"MinWorkers": (integer, False),
"Name": (str, True),
"NetworkConfiguration": (NetworkConfiguration, False),
"PluginsS3ObjectVersion": (str, False),
"PluginsS3Path": (str, False),
"RequirementsS3ObjectVersion": (str, False),
"RequirementsS3Path": (str, False),
"Schedulers": (integer, False),
"SourceBucketArn": (str, False),
"Tags": (Tags, False),
"WebserverAccessMode": (str, False),
"WeeklyMaintenanceWindowStart": (str, False),
}
| # Copyright (c) 2012-2018, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer
class UpdateError(AWSProperty):
props = {
"ErrorCode": (str, False),
"ErrorMessage": (str, False),
}
class LastUpdate(AWSProperty):
props = {
"CreatedAt": (str, False),
"Error": (UpdateError, False),
"Status": (str, False),
}
class ModuleLoggingConfiguration(AWSProperty):
props = {
"CloudWatchLogGroupArn": (str, False),
"Enabled": (boolean, False),
"LogLevel": (str, False),
}
class LoggingConfiguration(AWSProperty):
props = {
"DagProcessingLogs": (ModuleLoggingConfiguration, False),
"SchedulerLogs": (ModuleLoggingConfiguration, False),
"TaskLogs": (ModuleLoggingConfiguration, False),
"WebserverLogs": (ModuleLoggingConfiguration, False),
"WorkerLogs": (ModuleLoggingConfiguration, False),
}
class NetworkConfiguration(AWSProperty):
props = {
"SecurityGroupIds": (list, True),
"SubnetIds": (list, True),
}
class Environment(AWSObject):
resource_type = "AWS::MWAA::Environment"
props = {
"AirflowConfigurationOptions": (dict, False),
"AirflowVersion": (str, False),
"DagS3Path": (str, False),
"EnvironmentClass": (str, False),
"ExecutionRoleArn": (str, False),
"KmsKey": (str, False),
"LoggingConfiguration": (LoggingConfiguration, False),
"MaxWorkers": (integer, False),
"MinWorkers": (integer, False),
"Name": (str, True),
"NetworkConfiguration": (NetworkConfiguration, False),
"PluginsS3ObjectVersion": (str, False),
"PluginsS3Path": (str, False),
"RequirementsS3ObjectVersion": (str, False),
"RequirementsS3Path": (str, False),
"SourceBucketArn": (str, False),
"Tags": (Tags, False),
"WebserverAccessMode": (str, False),
"WeeklyMaintenanceWindowStart": (str, False),
}
| bsd-2-clause | Python |
ecc8c7c0bc869e9afa82a941586c840e80d91ba8 | Improve docstring | lvh/txscrypt | txscrypt/wrapper.py | txscrypt/wrapper.py | """
Wrapper around scrypt.
"""
import os
import scrypt
from twisted.cred import error
from twisted.internet import threads
NONCE_LENGTH = 64
MAX_TIME = .1
def verifyPassword(stored, provided):
"""
Verifies that the stored derived key was computed from the provided
password.
Returns a deferred that will either be fired with ``None`` or fail with
``twisted.cred.error.UnauthorizedLogin``.
"""
d = threads.deferToThread(scrypt.decrypt, stored, provided)
def _swallowResult(_result):
"""
Swallows the result (the original nonce), returns ``None``.
"""
return None
def _scryptErrback(failure):
"""
Catches the scrypt error and turns it into a Twisted Cred error.
"""
failure.trap(scrypt.error)
raise error.UnauthorizedLogin()
return d.addCallbacks(_swallowResult, _scryptErrback)
def checkPassword(stored, provided):
"""
Checks that the stored key was computed from the provided password.
Returns a deferred that will fire with ``True`` (if the password was
correct) or ``False`` otherwise.
"""
d = threads.deferToThread(scrypt.decrypt, stored, provided)
def _swallowResult(_result):
"""
Swallows the result (the original nonce), returns ``True``.
"""
return True
def _scryptErrback(failure):
"""
Catches scrypt errors and returns ``False``.
"""
failure.trap(scrypt.error)
return False
return d.addCallbacks(_swallowResult, _scryptErrback)
def computeKey(password, nonceLength=NONCE_LENGTH, maxTime=MAX_TIME):
"""
Computes a key from the password using a secure key derivation function.
"""
nonce = os.urandom(nonceLength)
return threads.deferToThread(scrypt.encrypt, nonce, password, maxTime)
| """
Wrapper around scrypt.
"""
import os
import scrypt
from twisted.cred import error
from twisted.internet import threads
NONCE_LENGTH = 64
MAX_TIME = .1
def verifyPassword(stored, provided):
"""
Verifies that the stored derived key was computed from the provided
password.
Returns a deferred that will either be fired with ``None`` or fail with
``twisted.cred.error.UnauthorizedLogin``.
"""
d = threads.deferToThread(scrypt.decrypt, stored, provided)
def _swallowResult(_result):
"""
Swallows the result (the original nonce), returns ``None``.
"""
return None
def _scryptErrback(failure):
"""
Catches the scrypt error and turns it into a Twisted Cred error.
"""
failure.trap(scrypt.error)
raise error.UnauthorizedLogin()
return d.addCallbacks(_swallowResult, _scryptErrback)
def checkPassword(stored, provided):
"""
Checks that the stored key was computed from the provided password.
"""
d = threads.deferToThread(scrypt.decrypt, stored, provided)
def _swallowResult(_result):
"""
Swallows the result (the original nonce), returns ``True``.
"""
return True
def _scryptErrback(failure):
"""
Catches scrypt errors and returns ``False``.
"""
failure.trap(scrypt.error)
return False
return d.addCallbacks(_swallowResult, _scryptErrback)
def computeKey(password, nonceLength=NONCE_LENGTH, maxTime=MAX_TIME):
"""
Computes a key from the password using a secure key derivation function.
"""
nonce = os.urandom(nonceLength)
return threads.deferToThread(scrypt.encrypt, nonce, password, maxTime)
| isc | Python |
c0933cd553043858f0a996edca2f4dc1b1da78c9 | clarify comment | CaptainDesAstres/Simple-Blender-Render-Manager | usefullFunctions.py | usefullFunctions.py | #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
import time
def now(short = True):
'''return current date in short or long form (HH:MM:SS or DD.MM.AAAA-HH:MM:SS)'''
if short == True:
return time.strftime('%H:%M:%S')
else:
return time.strftime('%d.%m.%Y-%H:%M:%S')
def columnLimit(value, limit, begin = True, sep = '|'):
'''make fix sized text column'''
if type(value) is not str:
value = str(value)
if begin is True:
begin = limit# number of first caracter to display
if len(value) > limit:
return (value[0:begin-1]+'…'# first caracter\
+value[len(value)-(limit-begin):]# last caracter\
+sep) # column seperator
else:
return value + (' '*(limit-len(value))) +sep# add space to match needed size
def indexPrintList(l):
'''a function to print a list with element index'''
for i, v in enumerate(l):
print(str(i)+'- '+str(v))
class XML:
''' a class containing usefull method for XML'''
entities = {
'\'':''',
'"':'"',
'<':'<',
'>':'>'
}
def encode(txt):
'''replace XML entities by XML representation'''
txt.replace('&', '&')
for entity, code in XML.entities.items():
txt.replace(entity, code)
return txt
def decode(txt):
'''XML representation by the original character'''
for entity, code in XML.entities.items():
txt.replace(code, entity)
txt.replace('&', '&')
return txt
| #!/usr/bin/python3.4
# -*-coding:Utf-8 -*
import time
def now(short = True):
'''return short (HH:MM:SS) or long (DD.MM.AAAA-HH:MM:SS) formated current date strings'''
if short == True:
return time.strftime('%H:%M:%S')
else:
return time.strftime('%d.%m.%Y-%H:%M:%S')
def columnLimit(value, limit, begin = True, sep = '|'):
'''make fix sized text column'''
if type(value) is not str:
value = str(value)
if begin is True:
begin = limit# number of first caracter to display
if len(value) > limit:
return (value[0:begin-1]+'…'# first caracter\
+value[len(value)-(limit-begin):]# last caracter\
+sep) # column seperator
else:
return value + (' '*(limit-len(value))) +sep# add space to match needed size
def indexPrintList(l):
'''a function to print a list with element index'''
for i, v in enumerate(l):
print(str(i)+'- '+str(v))
class XML:
''' a class containing usefull method for XML'''
entities = {
'\'':''',
'"':'"',
'<':'<',
'>':'>'
}
def encode(txt):
'''replace XML entities by XML representation'''
txt.replace('&', '&')
for entity, code in XML.entities.items():
txt.replace(entity, code)
return txt
def decode(txt):
'''XML representation by the original character'''
for entity, code in XML.entities.items():
txt.replace(code, entity)
txt.replace('&', '&')
return txt
| mit | Python |
7b47a35ef77a71bc20570453d242f755e575d35a | Refactor index(). | s3rvac/git-branch-viewer,s3rvac/git-branch-viewer | viewer/web/views.py | viewer/web/views.py | """
viewer.web.views
~~~~~~~~~~~~~~~~
Views for the web.
:copyright: © 2014 by Petr Zemek <s3rvac@gmail.com> and contributors
:license: BSD, see LICENSE for more details
"""
from flask import render_template
from flask import g
from viewer import git
from . import app
@app.before_request
def before_request():
g.repo = git.Repo(app.config['GIT_REPO_PATH'])
@app.route('/')
def index():
context = {
'repo_name': g.repo.name,
'branches': g.repo.get_branches_on_remote(app.config['GIT_REMOTE'])
}
return render_template('index.html', **context)
| """
viewer.web.views
~~~~~~~~~~~~~~~~
Views for the web.
:copyright: © 2014 by Petr Zemek <s3rvac@gmail.com> and contributors
:license: BSD, see LICENSE for more details
"""
from flask import render_template
from flask import g
from viewer import git
from . import app
@app.before_request
def before_request():
g.repo = git.Repo(app.config['GIT_REPO_PATH'])
@app.route('/')
def index():
branches = g.repo.get_branches_on_remote(app.config['GIT_REMOTE'])
return render_template('index.html', repo_name=g.repo.name, branches=branches)
| bsd-3-clause | Python |
ce18939a9ee952906677b7a6be8a42e06dc21fb0 | Update Matrices | Echelon9/vulk,Echelon9/vulk,realitix/vulk,realitix/vulk | vulk/math/matrix.py | vulk/math/matrix.py | '''Matrix module
This module contains all class relative to Matrix
'''
import numpy as np
class Matrix():
'''Base class for Matrix'''
def __init__(self, values):
self._values = np.matrix(values, dtype=np.float32)
@property
def values(self):
return self._values
def set(self, values, offset=0):
self._values[offset:] = values
def mul(self, matrix):
self._values = np.matmul(self._values, matrix.values)
class Matrix3(Matrix):
'''Matrix3 class'''
dtype = ('Matrix3', np.float32, (3, 3))
def __init__(self, *args):
if not args:
super().__init__([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
elif np.shape(args) == (3, 3):
super().__init__(args)
else:
raise ValueError("Matrix3 needs 9 components")
class Matrix4(Matrix):
'''Matrix4 class'''
dtype = ('Matrix4', np.float32, (4, 4))
def __init__(self, *args):
if not args:
super().__init__([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
elif np.shape(args) == (4, 4):
super().__init__(args)
else:
raise ValueError("Matrix4 needs 16 components")
| '''Matrix module
This module contains all class relative to Matrix
'''
import numpy as np
class Matrix():
'''Base class for Matrix'''
def __init__(self, values):
self._values = np.array(values, dtype=np.float32)
@property
def values(self):
return self._values
class Matrix3(Matrix):
'''Matrix3 class'''
def __init__(self, *args):
if not args:
super().__init__([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
elif np.shape(args) == (3, 3):
super().__init__(args)
else:
raise ValueError("Matrix3 needs 9 components")
class Matrix4(Matrix):
'''Matrix4 class'''
def __init__(self, *args):
if not args:
super().__init__([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
elif np.shape(args) == (4, 4):
super().__init__(args)
else:
raise ValueError("Matrix4 needs 16 components")
| apache-2.0 | Python |
0c4d2e99d5b2ffef043ec8b35d47ee49f0d382f7 | Clean processors | tartopum/MPF,Vayel/MPF | src/processors.py | src/processors.py | import numpy as np
class FourierTransform:
def __init__(self, real=True, timestep=1):
# Define helpers
self.fft = np.fft.rfft if real else np.fft.fft
self.freq = np.fft.fftfreq
self.shift = np.fft.fftshift
self.real = real
self.timestep = timestep
def work(self, data):
# Prepare data
data = np.asarray(data)
# Run (R)FFT
spectrum = self.fft(data)
freq = self.freq(len(spectrum), d=self.timestep)
freq = self.shift(freq)
return freq, np.real(spectrum), np.imag(spectrum)
class LeastSquares:
def __init__(self):
pass
def work(self, A, B):
# B = AX
return np.linalg.lstsq(A, B)[0] # X
class MovingAverage:
def __init__(self, step):
self.step = step
def work(self, x, y):
# Not to alter args
x = x + []
y = y + []
x = x[self.step:len(x) - self.step]
average_y = []
for k in range(self.step, len(y)-self.step):
# Sum 'step' y before and after the current one,
# which is y[k]
s = sum(y[k-self.step:k+self.step+1])
average = float(s) / (2*self.step + 1)
average_y.append(average)
y = average_y
return x, y
| import numpy as np
class FourierTransform:
def __init__(self, real=True, timestep=1):
# Define helpers
self.fft = np.fft.rfft if real else np.fft.fft
self.freq = np.fft.fftfreq
self.shift = np.fft.fftshift
self.real = real
self.timestep = timestep
def process(self, data):
# Prepare data
data = np.asarray(data)
# Run (R)FFT
spectrum = self.fft(data)
freq = self.freq(len(spectrum), d=self.timestep)
freq = self.shift(freq)
return freq, np.real(spectrum), np.imag(spectrum)
class LeastSquares:
def __init__(self):
pass
def process(self, x, y):
_x = sum(x)/len(x)
_y = sum(y)/len(y)
a_num = 0
a_den = 0
for i in range(len(x)):
a_num += (x[i] - _x) * (y[i] - _y)
a_den += (x[i] - _x)**2
a = a_num/a_den
b = _y - a * _x
x = np.array(x)
return x, a*x + b
class MovingAverage:
def __init__(self, step, rep):
self.step = step
self.rep = rep
def process(self, x, y):
# Not to alter args
x = x + []
y = y + []
for i in range(self.rep):
x = x[self.step:len(x) - self.step]
average_y = []
for k in range(self.step, len(y)-self.step):
# Sum 'step' y before and after the current one,
# which is y[k]
s = sum(y[k-self.step:k+self.step+1])
average = float(s) / (2*self.step + 1)
average_y.append(average)
y = average_y
return x, y
| mit | Python |
2fdf35f8a9bf7a6249bc92236952655314a47080 | Add author and lincense variables | elbaschid/swapify | swapify/__init__.py | swapify/__init__.py | # -*- encoding: utf-8 -*-
__author__ = 'Sebastian Vetter'
__version__ = VERSION = '0.0.0'
__license__ = 'MIT'
| # -*- encoding: utf-8 -*-
__version__ = VERSION = '0.0.0' | mit | Python |
f90588ea092e57ad26535163f42647d9f532eeeb | update import statement | texastribune/armstrong.core.tt_sections,texastribune/armstrong.core.tt_sections,texastribune/armstrong.core.tt_sections | armstrong/core/tt_sections/utils.py | armstrong/core/tt_sections/utils.py | from django.conf import settings
from django.utils.module_loading import import_module
def get_module_and_model_names():
s = (getattr(settings, "ARMSTRONG_SECTION_ITEM_MODEL", False) or
"armstrong.apps.content.models.Content")
return s.rsplit(".", 1)
def get_item_model_class():
module_name, class_name = get_module_and_model_names()
module = import_module(module_name)
return getattr(module, class_name)
def filter_item_rels(rels):
model_rels = []
ItemModel = get_item_model_class()
for related in rels:
if issubclass(ItemModel, related.related_model):
model_rels.append(related)
return model_rels
def get_section_relations(Section):
"""Find every relationship between section and the item model."""
all_rels = [f for f in Section._meta.get_fields(include_hidden=True) if
f.is_relation and f.auto_created]
return filter_item_rels(all_rels)
def get_section_many_to_many_relations(Section):
m2m_rels = [f for f in Section._meta.get_fields(include_hidden=True) if
f.many_to_many and f.auto_created]
return filter_item_rels(m2m_rels)
| from django.conf import settings
from django.utils.importlib import import_module
def get_module_and_model_names():
s = (getattr(settings, "ARMSTRONG_SECTION_ITEM_MODEL", False) or
"armstrong.apps.content.models.Content")
return s.rsplit(".", 1)
def get_item_model_class():
module_name, class_name = get_module_and_model_names()
module = import_module(module_name)
return getattr(module, class_name)
def filter_item_rels(rels):
model_rels = []
ItemModel = get_item_model_class()
for related in rels:
if issubclass(ItemModel, related.related_model):
model_rels.append(related)
return model_rels
def get_section_relations(Section):
"""Find every relationship between section and the item model."""
all_rels = [f for f in Section._meta.get_fields(include_hidden=True) if
f.is_relation and f.auto_created]
return filter_item_rels(all_rels)
def get_section_many_to_many_relations(Section):
m2m_rels = [f for f in Section._meta.get_fields(include_hidden=True) if
f.many_to_many and f.auto_created]
return filter_item_rels(m2m_rels)
| apache-2.0 | Python |
bb05f21ca47f636197764fb9518282318270d663 | Add tests around hatband.autodiscover functionality | texastribune/armstrong.hatband,texastribune/armstrong.hatband,armstrong/armstrong.hatband,armstrong/armstrong.hatband,armstrong/armstrong.hatband,texastribune/armstrong.hatband | armstrong/hatband/tests/__init__.py | armstrong/hatband/tests/__init__.py | from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from contextlib import contextmanager
import fudge
from .widgets import *
from ... import hatband
def generate_random_registry():
from ._utils import random_range
return dict([("key%d" % i, i) for i in random_range()])
@contextmanager
def fake_autodiscover():
from django.contrib import admin
autodiscover = fudge.Fake().is_callable()
with fudge.patched_context(admin, "autodiscover", autodiscover):
yield
@contextmanager
def fake_django_site_registry(test):
with fake_autodiscover():
random_registry = generate_random_registry()
from django.contrib import admin
site = fudge.Fake()
site.has_attr(_registry=random_registry)
with fudge.patched_context(admin, "site", site):
test.assertEqual( len(random_registry.items()),
len(site._registry.items()), msg="sanity check")
yield random_registry
class AutodiscoverTestCase(HatbandTestCase):
def setUp(self):
from copy import copy
self.original_site = copy(hatband.site)
hatband.site._registry = {}
def tearDown(self):
hatband.site = self.original_site
@fudge.test
def test_dispatches_to_djangos_autodiscover(self):
from django.contrib import admin
autodiscover = fudge.Fake().is_callable().expects_call()
with fudge.patched_context(admin, "autodiscover", autodiscover):
hatband.autodiscover()
@fudge.test
def test_has_a_copy_of_main_django_registry(self):
random_registry = generate_random_registry()
from django.contrib import admin
site = fudge.Fake()
site.has_attr(_registry=random_registry)
with fake_autodiscover():
with fudge.patched_context(admin, "site", site):
hatband.autodiscover()
for key in random_registry.keys():
self.assertTrue(key in hatband.site._registry)
@fudge.test
def test_has_hatband_registered_plus_(self):
with fake_django_site_registry(self) as random_registry:
from .hatband_support.models import TestCategory
self.assertFalse(TestCategory in hatband.site._registry.keys(),
msg="Sanity check")
hatband.site.register(TestCategory)
self.assertTrue(TestCategory in hatband.site._registry.keys(),
msg="Sanity check")
hatband.autodiscover()
registry = hatband.site._registry.items()
self.assertTrue(TestCategory in hatband.site._registry.keys(),
msg="TestCategory should still be in the registry")
@fudge.test
def test_original_django_sites_registry_remains_untouched(self):
with fake_django_site_registry(self) as random_registry:
from .hatband_support.models import TestCategory
self.assertFalse(TestCategory in random_registry.keys())
hatband.site.register(TestCategory)
hatband.autodiscover()
self.assertFalse(TestCategory in random_registry.keys())
| from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from .widgets import *
| apache-2.0 | Python |
4d1b3b2ae58f214d8d78e8b853c7cf5570c097ff | Update to guestagent #169 | kostaslamda/win-installer,xenserver/win-installer,kostaslamda/win-installer,OwenSmith/win-installer,xenserver/win-installer,kostaslamda/win-installer,OwenSmith/win-installer,xenserver/win-installer,OwenSmith/win-installer,xenserver/win-installer,kostaslamda/win-installer,kostaslamda/win-installer,OwenSmith/win-installer,OwenSmith/win-installer,xenserver/win-installer | manifestspecific.py | manifestspecific.py | # Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
secureserver = r'\\10.80.13.10\distfiles\distfiles\WindowsBuilds'
localserver = r'\\camos.uk.xensource.com\build\windowsbuilds\WindowsBuilds'
build_tar_source_files = {
"xenguestagent" : r'xenguestagent.git\169\xenguestagent.tar',
"xenbus" : r'xenbus-patchq.git\63\xenbus.tar',
"xenvif" : r'xenvif-patchq.git\60\xenvif.tar',
"xennet" : r'xennet-patchq.git\41\xennet.tar',
"xeniface" : r'xeniface-patchq.git\26\xeniface.tar',
"xenvbd" : r'xenvbd-patchq.git\124\xenvbd.tar',
"xenvss" : r'standard-lcm\16\xenvss-7.tar',
}
all_drivers_signed = False
| # Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
secureserver = r'\\10.80.13.10\distfiles\distfiles\WindowsBuilds'
localserver = r'\\camos.uk.xensource.com\build\windowsbuilds\WindowsBuilds'
build_tar_source_files = {
"xenguestagent" : r'xenguestagent.git\168\xenguestagent.tar',
"xenbus" : r'xenbus-patchq.git\63\xenbus.tar',
"xenvif" : r'xenvif-patchq.git\60\xenvif.tar',
"xennet" : r'xennet-patchq.git\41\xennet.tar',
"xeniface" : r'xeniface-patchq.git\26\xeniface.tar',
"xenvbd" : r'xenvbd-patchq.git\124\xenvbd.tar',
"xenvss" : r'standard-lcm\16\xenvss-7.tar',
}
all_drivers_signed = False
| bsd-2-clause | Python |
40897be402bd05ed5fb53e116f03d2d954720245 | Fix plate-solving on local development mode | astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin | astrobin_apps_platesolving/utils.py | astrobin_apps_platesolving/utils.py | # Python
import urllib2
# Django
from django.conf import settings
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
def getFromStorage(image, alias):
def encoded(path):
return urllib2.quote(path.encode('utf-8'))
url = image.thumbnail(alias)
if "://" in url:
# We are getting the full path and must only encode the part after the protocol
# (we assume that the hostname is ASCII)
protocol, path = url.split("://")
url = protocol + encoded(path)
else:
url = settings.BASE_URL + encoded(url)
headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib2.Request(url, None, headers)
img = NamedTemporaryFile(delete = True)
img.write(urllib2.urlopen(req).read())
img.flush()
img.seek(0)
return File(img)
| # Python
import urllib2
# Django
from django.conf import settings
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
def getFromStorage(image, alias):
url = image.thumbnail(alias)
if "://" in url:
url = url.split('://')[1]
else:
url = settings.BASE_URL + url
url = 'http://' + urllib2.quote(url.encode('utf-8'))
headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib2.Request(url, None, headers)
img = NamedTemporaryFile(delete = True)
img.write(urllib2.urlopen(req).read())
img.flush()
img.seek(0)
return File(img)
| agpl-3.0 | Python |
1165cb98c364d2eca9d313b02c710bc9a26ca3f0 | Fix borg test. | Dioptas/pymatgen,migueldiascosta/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,migueldiascosta/pymatgen,sonium0/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen,Dioptas/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,sonium0/pymatgen,sonium0/pymatgen,ctoher/pymatgen,yanikou19/pymatgen,ctoher/pymatgen | pymatgen/apps/borg/tests/test_queen.py | pymatgen/apps/borg/tests/test_queen.py | """
Created on Mar 18, 2012
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest
import os
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 2)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| """
Created on Mar 18, 2012
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest
import os
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, None)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 2)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit | Python |
64888afcd8696a8159aa2eda2bb78681ac22d668 | Prepare v0.9.3 | jml/pyrsistent,jkbjh/pyrsistent,Futrell/pyrsistent,Futrell/pyrsistent,jkbjh/pyrsistent,jml/pyrsistent,jkbjh/pyrsistent,tobgu/pyrsistent,tobgu/pyrsistent,Futrell/pyrsistent,tobgu/pyrsistent,jml/pyrsistent | _pyrsistent_version.py | _pyrsistent_version.py | __version__ = '0.9.3'
| __version__ = '0.9.2'
| mit | Python |
625279d8b76e4187ee90e9c2fd4a84553d980e02 | Use 10 second buckets instead of 1 second bucket | simonw/django-redis-monitor | django_redis_monitor/redis_monitor.py | django_redis_monitor/redis_monitor.py | import datetime # we use utcnow to insulate against daylight savings errors
import redis
class RedisMonitor(object):
def __init__(self, prefix=''):
assert prefix and ' ' not in prefix, \
'prefix (e.g. "rps") is required and must not contain spaces'
self.prefix = prefix
self.r = redis.Redis()
def _hash_and_slot(self, dt = None):
dt = dt or datetime.datetime.utcnow()
hash = dt.strftime('%Y%m%d:%H') # 20100709:12 = 12th hour of that day
slot = '%02d:%d' % ( # 24:3 for seconds 30-39 in minute 24
dt.minute, dt.second / 10
)
return ('%s:%s' % (self.prefix, hash), slot)
def _calculate_start(self, hours, minutes, seconds, now = None):
now = now or datetime.datetime.utcnow()
delta = (60 * 60 * hours) + (60 * minutes) + seconds
return now - datetime.timedelta(seconds = delta)
def record_hit(self):
self.record_hits(1)
def record_hits(self, num_hits):
hash, slot = self._hash_and_slot()
self.r.hincrby(hash, slot, num_hits)
def record_hit_with_weight(self, weight):
self.record_hits_with_total_weight(1, weight)
def record_hits_with_total_weight(self, num_hits, total_weight):
hash, slot = self._hash_and_slot()
self.r.hincrby(hash, slot, num_hits)
self.r.hincrby(hash, slot + 'd', total_weight)
def get_recent_hits_per_second(self, hours = 0, minutes = 0, seconds = 0):
start = self._calculate_start(hours, minutes, seconds)
def get_recent_avgs_per_second(self, hours = 0, minutes = 0, seconds = 0):
pass
def get_recent_hits_per_minute(self, hours = 0, minutes = 0, seconds = 0):
pass
def get_recent_avgs_per_minute(self, hours = 0, minutes = 0, seconds = 0):
pass
| import datetime # we use utcnow to insulate against daylight savings errors
import redis
class RedisMonitor(object):
def __init__(self, prefix=''):
assert prefix and ' ' not in prefix, \
'prefix (e.g. "rps") is required and must not contain spaces'
self.prefix = prefix
self.r = redis.Redis()
def _hash_and_slot(self, dt = None):
dt = dt or datetime.datetime.utcnow()
hash = dt.strftime('%Y%m%d:%H') # 20100709:12 = 12th hour of that day
slot = dt.strftime('%M%S') # 2405 for 5th second of 24th minute
return ('%s:%s' % (self.prefix, hash), slot)
def _calculate_start(self, hours, minutes, seconds, now = None):
now = now or datetime.datetime.utcnow()
delta = (60 * 60 * hours) + (60 * minutes) + seconds
return now - datetime.timedelta(seconds = delta)
def record_hit(self):
self.record_hits(1)
def record_hits(self, num_hits):
hash, slot = self._hash_and_slot()
self.r.hincrby(hash, slot, num_hits)
def record_hit_with_weight(self, weight):
self.record_hits_with_total_weight(1, weight)
def record_hits_with_total_weight(self, num_hits, total_weight):
hash, slot = self._hash_and_slot()
self.r.hincrby(hash, slot, num_hits)
self.r.hincrby(hash, slot + 'd', total_weight)
def get_recent_hits_per_second(self, hours = 0, minutes = 0, seconds = 0):
start = self._calculate_start(hours, minutes, seconds)
def get_recent_avgs_per_second(self, hours = 0, minutes = 0, seconds = 0):
pass
def get_recent_hits_per_minute(self, hours = 0, minutes = 0, seconds = 0):
pass
def get_recent_avgs_per_minute(self, hours = 0, minutes = 0, seconds = 0):
pass
| bsd-2-clause | Python |
6aba9d1f6de927b2a78530ff36edf0603e4c4e43 | Fix bugs in demo_threadcrawler.py | tosh1ki/pyogi,tosh1ki/pyogi | doc/sample_code/demo_threadcrawler.py | doc/sample_code/demo_threadcrawler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
2chの「将棋の棋譜貼り専門スレッド」から棋譜を収集するデモ
'''
import re
import sys
sys.path.append('./../../')
from pyogi.kifu import Kifu
from pyogi.threadcrawler import ThreadCrawler
from pyogi.ki2converter import Ki2converter
if __name__ == '__main__':
# Crawl html that contains kifu of KI2 format
# From "将棋の棋譜貼り専門スレッド Part121 [転載禁止]©2ch.net"
url = 'http://peace.2ch.net/test/read.cgi/bgame/1428330063/'
crawler = ThreadCrawler()
crawler.get_response(url)
matched = crawler.extract_kakikomitxt()
# Extract one of kifu text
ki2 = [m for m in matched if '▲' in m and '△' in m]
ki2txt = re.sub(' (?:<br> ?)+', '\n', ki2[0])
# Convert the kifu from KI2 to CSA
ki2converter = Ki2converter()
ki2converter.from_txt(ki2txt)
csa = ki2converter.to_csa()
# Print last state
kifu = Kifu()
kifu.from_csa(csa)
kifu.print_state(mode='mpl')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
2chの「将棋の棋譜貼り専門スレッド」から棋譜を収集するデモ
'''
import sys
sys.path.append('./../../')
from pyogi.kifu import Kifu
from pyogi.threadcrawler import ThreadCrawler
from pyogi.ki2converter import Ki2converter
if __name__ == '__main__':
# Crawl html that contains kifu of KI2 format
# From "将棋の棋譜貼り専門スレッド Part121 [転載禁止]©2ch.net"
url = 'http://peace.2ch.net/test/read.cgi/bgame/1428330063/'
crawler = ThreadCrawler()
crawler.get_response(url)
matched = crawler.extract_kifutxt()
# Extract one of kifu text
ki2 = [m[10] for m in matched if '▲' in m[10] and '△' in m[10]]
ki2txt = re.sub(' (?:<br> )+', '\n', ki2[0])
# Convert the kifu from KI2 to CSA
ki2converter = Ki2converter()
ki2converter.from_txt(ki2txt)
csa = ki2converter.to_csa()
# Print last state
kifu = Kifu()
kifu.from_csa(csa)
kifu.print_state(mode='mpl')
| mit | Python |
81364d11dd935409fddfdeca865e30220960f4ba | Bump version | j4mie/django-activelink | activelink/__init__.py | activelink/__init__.py | __version__ = '0.2'
__author__ = 'Jamie Matthews (http://j4mie.org) <jamie.matthews@gmail.com>'
| __version__ = '0.1'
__author__ = 'Jamie Matthews (http://j4mie.org) <jamie.matthews@gmail.com>'
| unlicense | Python |
8a0ec58b2a01ee179fcbc4c3c722df37f42b7687 | Add more test cases | nltk/nltk,nltk/nltk,nltk/nltk | nltk/test/unit/test_aline.py | nltk/test/unit/test_aline.py | # -*- coding: utf-8 -*-
"""
Unit tests for nltk.metrics.aline
"""
from __future__ import unicode_literals
import unittest
from nltk.metrics import aline
class TestAline(unittest.TestCase):
"""
Test Aline algorithm for aligning phonetic sequences
"""
def test_aline(self):
result = aline.align('θin', 'tenwis')
expected = [[('θ', 't'), ('i', 'e'), ('n', 'n'), ('-', 'w'), ('-', 'i'), ('-', 's')]]
self.assertEqual(result, expected)
result = aline.align('jo', 'ʒə')
expected = [[('j', 'ʒ'), ('o', 'ə')]]
self.assertEqual(result, expected)
result = aline.align('pematesiweni', 'pematesewen')
expected = [[('p', 'p'), ('e', 'e'), ('m', 'm'), ('a', 'a'), ('t', 't'), ('e', 'e'),
('s', 's'), ('i', 'e'), ('w', 'w'), ('e', 'e'), ('n', 'n'), ('i', '-')]]
self.assertEqual(result, expected)
result = aline.align('tuwθ', 'dentis')
expected = [[('t', 'd'), ('u', 'e'), ('w', '-'), ('-', 'n'), ('-', 't'), ('-', 'i'), ('θ', 's')]]
self.assertEqual(result, expected)
result = aline.align('wən', 'unus')
expected = [[('ə', 'u'), ('n', 'n'), ('-', 'u'), ('-', 's')]]
self.assertEqual(result, expected)
result = aline.align('flow', 'fluere')
expected = [[('f', 'f'), ('l', 'l'), ('o', 'u'), ('w', '-'), ('-', 'e'), ('-', 'r'), ('-', 'e')]]
self.assertEqual(result, expected)
result = aline.align('wat', 'vas')
expected = [[('w', 'v'), ('a', 'a'), ('t', 's')]]
self.assertEqual(result, expected)
result = aline.align('boka', 'buʃ')
expected = [[('b', 'b'), ('o', 'u'), ('k', 'ʃ'), ('a', '-')]]
self.assertEqual(result, expected)
result = aline.align('ombre', 'om')
expected = [[('o', 'o'), ('m', 'm'), ('b', '-'), ('r', '-'), ('e', '-')]]
self.assertEqual(result, expected)
result = aline.align('feðər', 'fEdər')
expected = [[('f', 'f'), ('e', 'E'), ('ð', 'd'), ('ə', 'ə'), ('r', 'r')]]
self.assertEqual(result, expected)
| # -*- coding: utf-8 -*-
"""
Unit tests for nltk.metrics.aline
"""
from __future__ import unicode_literals
import unittest
from nltk.metrics import aline
class TestAline(unittest.TestCase):
"""
Test Aline algorithm for aligning phonetic sequences
"""
def test_aline(self):
result = aline.align('θin', 'tenwis')
expected = [[('θ', 't'), ('i', 'e'), ('n', 'n'), ('-', 'w'), ('-', 'i'), ('-', 's')]]
self.assertEqual(result, expected)
| apache-2.0 | Python |
3ef08477fa6131d67020cb000657cb9c2b78b06a | Bump graql and client-java versions | lolski/grakn,graknlabs/grakn,lolski/grakn,lolski/grakn,graknlabs/grakn,graknlabs/grakn,graknlabs/grakn,lolski/grakn | dependencies/graknlabs/dependencies.bzl | dependencies/graknlabs/dependencies.bzl | #
# GRAKN.AI - THE KNOWLEDGE GRAPH
# Copyright (C) 2018 Grakn Labs Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def graknlabs_graql():
git_repository(
name = "graknlabs_graql",
remote = "https://github.com/graknlabs/graql",
commit = "b0a7a7488a51fedf82a905febef2c9421594193e",
)
def graknlabs_client_java():
git_repository(
name = "graknlabs_client_java",
remote = "https://github.com/graknlabs/client-java",
commit = "ec2470f7f051981c0127579f4cd03e1ffd529f1b",
)
def graknlabs_build_tools():
git_repository(
name = "graknlabs_build_tools",
remote = "https://github.com/graknlabs/build-tools",
commit = "b5bc539c4ee6633188b270aeca328419a0d7e096", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_build_tools
) | #
# GRAKN.AI - THE KNOWLEDGE GRAPH
# Copyright (C) 2018 Grakn Labs Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def graknlabs_graql():
git_repository(
name = "graknlabs_graql",
remote = "https://github.com/graknlabs/graql",
commit = "2b4ea9781ba164104e4e00fb2ffe8d0928f14848",
)
def graknlabs_client_java():
git_repository(
name = "graknlabs_client_java",
remote = "https://github.com/graknlabs/client-java",
commit = "cf13125f696481549710c1dc5b146bc14374d2e1",
)
def graknlabs_build_tools():
git_repository(
name = "graknlabs_build_tools",
remote = "https://github.com/graknlabs/build-tools",
commit = "b5bc539c4ee6633188b270aeca328419a0d7e096", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_build_tools
) | agpl-3.0 | Python |
c8e2eb7e56e6ebc014983d856007cdad324e2242 | Fix Python3.x error related to unicode. | jojanper/draalcore,jojanper/draalcore,jojanper/draalcore | draalcore/auth/registration/models.py | draalcore/auth/registration/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Registration model(s)"""
# System imports
import hashlib
import random
from django.db import models
from django.contrib.auth.models import User
try:
bool(type(unicode))
except NameError:
unicode = str
# Project imports
from draalcore.models.base_model import ModelLogger, ModelBaseManager
class UserAccountManager(ModelBaseManager):
def register_user(self, **kwargs):
new_user = User.objects.create_user(kwargs['username'], kwargs['email'], kwargs['password'])
new_user.first_name = kwargs['first_name']
new_user.last_name = kwargs['last_name']
new_user.is_active = False
new_user.save()
# user_account_profile = self.create_account_profile(new_user)
def create_account_profile(self, user):
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt + username).hexdigest()
return self.create(user=user, activation_key=activation_key)
class UserAccountProfile(ModelLogger):
"""User account management data"""
EXTERNAL_API = False
user = models.OneToOneField(User, help_text='User', related_name='account_profiles')
activation_key = models.CharField(max_length=40, help_text='Account activation key')
objects = UserAccountManager()
class Meta:
db_table = 'useraccount'
def __str__(self):
return "{}({},{},{})".format(self.__class__.__name__, self.id, self.user, self.activation_key)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Registration model(s)"""
# System imports
import hashlib
import random
from django.db import models
from django.contrib.auth.models import User
# Project imports
from draalcore.models.base_model import ModelLogger, ModelBaseManager
class UserAccountManager(ModelBaseManager):
def register_user(self, **kwargs):
new_user = User.objects.create_user(kwargs['username'], kwargs['email'], kwargs['password'])
new_user.first_name = kwargs['first_name']
new_user.last_name = kwargs['last_name']
new_user.is_active = False
new_user.save()
# user_account_profile = self.create_account_profile(new_user)
def create_account_profile(self, user):
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt + username).hexdigest()
return self.create(user=user, activation_key=activation_key)
class UserAccountProfile(ModelLogger):
"""User account management data"""
EXTERNAL_API = False
user = models.OneToOneField(User, help_text='User', related_name='account_profiles')
activation_key = models.CharField(max_length=40, help_text='Account activation key')
objects = UserAccountManager()
class Meta:
db_table = 'useraccount'
def __str__(self):
return "{}({},{},{})".format(self.__class__.__name__, self.id, self.user, self.activation_key)
| mit | Python |
ffbc1655b330df6716c737c36f7d05e2e91f133e | Revise functions/vars/comments/main() for clarification | bowen0701/algorithms_data_structures | alg_decimal_to_base.py | alg_decimal_to_base.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
DIGITS = '0123456789ABCDEF'
def decimal_to_base2(decimal):
"""Convert decimal number to binary number by iteration.
Time complexity: O(d/2).
Space complexity: O(d/2).
"""
# Push remainders into stack.
rem_stack = []
while decimal > 0:
decimal, rem = divmod(decimal, 2)
rem_stack.append(rem)
# Pop remainders and concat them into binary number string.
bin_num = ''
while rem_stack:
bin_num += str(rem_stack.pop())
return bin_num
def decimal_to_base_iter(decimal, base):
"""Convert decimal number to base 2 ~ 16 by iteration.
Time complexity: O(d/b).
Space complexity: O(d/b).
"""
# Push remainders into stack.
rem_stack = []
while decimal > 0:
decimal, rem = divmod(decimal, base)
rem_stack.append(rem)
# Pop remainders and concat them into base number string.
base_num = ''
while rem_stack:
base_num += DIGITS[rem_stack.pop()]
return base_num
def _decimal_to_base_recur_util(decimal, base, rem_stack):
#
if decimal < base:
rem_stack.append(decimal)
else:
decimal, rem = divmod(decimal, base)
rem_stack.append(rem)
_decimal_to_base_recur_util(decimal, base, rem_stack)
def decimal_to_base_recur(decimal, base):
"""Convert decimal number to base 2 ~ 16 by recursion with stack.
Time complexity: O(d/b).
Space complexity: O(d/b).
"""
# Push remainders into stack.
rem_stack = []
_decimal_to_base_recur_util(decimal, base, rem_stack)
# Pop remainders and concat them into base number string.
base_num = ''
while rem_stack:
base_num += DIGITS[rem_stack.pop()]
return base_num
def main():
import time
# Binary: (37)_10 = (100101)_2
decimal = 37
start_time = time.time()
print('By iter w/ base 2: {} -> {}'
.format(decimal, decimal_to_base2(decimal)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By iter w/ general base 2: {} -> {}'
.format(decimal, decimal_to_base_iter(decimal, 2)))
print('Time: {}'.format(time.time() - start_time))
# Hexadecimal: (1728)_10 = (6C0)_16
decimal = 1728
start_time = time.time()
print('By iter: {} -> {}'
.format(decimal, decimal_to_base_iter(decimal, 16)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By recur: {} -> {}'
.format(decimal, decimal_to_base_recur(decimal, 16)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
digits = '0123456789ABCDEF'
def decimal_to_base2(decimal):
"""Convert decimal number to binary number."""
rem_stack = []
while decimal > 0:
decimal, rem = divmod(decimal, 2)
rem_stack.append(rem)
bin_num = ''
while rem_stack:
bin_num += str(rem_stack.pop())
return bin_num
def decimal_to_base_iter(decimal, base):
"""Convert decimal number to base 2 ~ 16.
Time complexity: O(d/b).
Space complexity: O(d/b).
"""
rem_stack = []
while decimal > 0:
decimal, rem = divmod(decimal, base)
rem_stack.append(rem)
base_num = ''
while rem_stack:
base_num += digits[rem_stack.pop()]
return base_num
def _decimal_to_base_recur(decimal, base, rem_stack):
if decimal < base:
rem_stack.append(decimal)
else:
decimal, rem = divmod(decimal, base)
rem_stack.append(rem)
_decimal_to_base_recur(decimal, base, rem_stack)
def decimal_to_base_recur(decimal, base):
"""Convert decimal number to base 2 ~ 16 by recussion with Stack.
Time complexity: O(d/b).
Space complexity: O(d/b).
"""
rem_stack = []
_decimal_to_base_recur(decimal, base, rem_stack)
base_num = ''
while rem_stack:
base_num += digits[rem_stack.pop()]
return base_num
def main():
# Binary: (37)_10 = (100101)_2
decimal = 37
print('Iter: {} -> {}'
.format(decimal, decimal_to_base2(decimal)))
print('Iter: {} -> {}'
.format(decimal, decimal_to_base_iter(decimal, 2)))
# Hexadecimal: (1728)_10 = (6C0)_16
decimal = 1728
print('Iter: {} -> {}'
.format(decimal, decimal_to_base_iter(decimal, 16)))
print('Recur: {} -> {}'
.format(decimal, decimal_to_base_recur(decimal, 16)))
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
7c43327af91dc672c02099c788221e6b3073612a | Complete traverse_dfs() | bowen0701/algorithms_data_structures | alg_knight_tour_dfs.py | alg_knight_tour_dfs.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from collections import defaultdict
from itertools import product
MOVE_OFFSETS = (
(-1, -2), (1, -2),
(-2, -1), (2, -1),
(-2, 1), (2, 1),
(-1, 2), (1, 2)
)
def _legal_moves_from(row, col, board_size):
for row_offset, col_offset in MOVE_OFFSETS:
move_row, move_col = row + row_offset, col + col_offset
if 0 <= move_row < board_size and 0 <= move_col < board_size:
yield move_row, move_col
def _add_edge(graph_dict, vertex, move_vertex):
graph_dict[vertex].add(move_vertex)
graph_dict[move_vertex].add(vertex)
def build_knight_tour_graph(board_size):
graph_dict = defaultdict(set)
for row, col in product(xrange(board_size), xrange(board_size)):
for move_row, move_col in _legal_moves_from(row, col, board_size):
_add_edge(graph_dict, (row, col), (move_row, move_col))
return graph_dict
def get_first_next_vertex(next_vertices):
for vertex in next_vertices:
if vertex:
return vertex
return None
def traverse_dfs(path_ls, current_vertex,
graph_dict, total_squares, sorted_func=None):
"""Depth First Search traverse."""
# Including the current square, if we have visited all squares,
# just return the whole path as the solution.
if len(path_ls) + 1 == total_squares:
return path_ls + [current_vertex]
legal_vertices = graph_dict[current_vertex] - set(path_ls)
if not legal_vertices:
# No legal neighbor vertices, so dead end.
return False
# Then try all valid paths.
next_vertices = sorted(legal_vertices, sorted_func)
return get_first_next_vertex(
traverse_dfs(
path_ls + [current_vertex], vertex
graph_dict, total_squares, sorted_func=sorted_func)
for vertex in next_vertices)
def knight_tour_dfs(board_size, sorted_func=None):
graph_dict = build_knight_tour_graph(board_size)
total_squares = graph_dict * graph_dict
pass
def main():
board_size = 5
graph_dict = build_knight_tour_graph(board_size)
print(graph_dict)
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from collections import defaultdict
from itertools import product
MOVE_OFFSETS = (
(-1, -2), (1, -2),
(-2, -1), (2, -1),
(-2, 1), (2, 1),
(-1, 2), (1, 2)
)
def _legal_moves_from(row, col, board_size):
for row_offset, col_offset in MOVE_OFFSETS:
move_row, move_col = row + row_offset, col + col_offset
if 0 <= move_row < board_size and 0 <= move_col < board_size:
yield move_row, move_col
def _add_edge(graph_dict, vertex, move_vertex):
graph_dict[vertex].add(move_vertex)
graph_dict[move_vertex].add(vertex)
def build_knight_tour_graph(board_size):
graph_dict = defaultdict(set)
for row, col in product(xrange(board_size), xrange(board_size)):
for move_row, move_col in _legal_moves_from(row, col, board_size):
_add_edge(graph_dict, (row, col), (move_row, move_col))
return graph_dict
def _get_first_next_vertex(next_vertices):
for vertex in next_vertices:
if vertex:
return vertex
return None
def traverse_dfs(path_ls, current_vertex, graph_dict, total_squares,
sorted_func=None):
"""Depth First Search traverse."""
# Including the current square, if we have visited all squares,
# just return the whole path as the solution.
if len(path_ls) + 1 == total_squares:
return path_ls + [current_vertex]
legal_vertices = graph_dict[current_vertex] - set(path_ls)
if not legal_vertices:
# No legal neighbor vertices, so dead end.
return False
# Then try all valid paths.
next_vertices = sorted(legal_vertices, sorted_func)
pass
def knight_tour_dfs(board_size, sorted_func=None):
graph_dict = build_knight_tour_graph(board_size)
total_squares = graph_dict * graph_dict
pass
def main():
board_size = 5
graph_dict = build_knight_tour_graph(board_size)
print(graph_dict)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
f462be35ca867fd2b487ac151f1c3be26eda6cbb | FIX report verson | sysadminmatmoz/ingadhoc,ingadhoc/odoo-addons,bmya/odoo-addons,adhoc-dev/odoo-addons,ingadhoc/product,adhoc-dev/odoo-addons,sysadminmatmoz/ingadhoc,ingadhoc/odoo-addons,ingadhoc/stock,ingadhoc/sale,adhoc-dev/account-financial-tools,dvitme/odoo-addons,bmya/odoo-addons,ingadhoc/sale,ingadhoc/account-financial-tools,ClearCorp/account-financial-tools,ingadhoc/account-payment,adhoc-dev/account-financial-tools,ClearCorp/account-financial-tools,ingadhoc/odoo-addons,bmya/odoo-addons,ingadhoc/product,ingadhoc/sale,sysadminmatmoz/ingadhoc,ingadhoc/sale,dvitme/odoo-addons,ingadhoc/account-analytic,ingadhoc/account-invoicing,dvitme/odoo-addons,ingadhoc/partner,adhoc-dev/odoo-addons | report_extended_voucher/__openerp__.py | report_extended_voucher/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Report Configurator - Account Voucher',
'version': '8.0.2.2.0',
'category': 'Reporting Subsystem',
'sequence': 14,
'summary': '',
'description': """
Report Configurator - Account Voucher
=====================================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'report_extended',
'account_voucher',
],
'data': [
'views/report_view.xml',
'views/account_voucher_view.xml',
'views/account_action_data.xml',
'voucher_email_data.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Report Configurator - Account Voucher',
'version': '8.0.1.2.0',
'category': 'Reporting Subsystem',
'sequence': 14,
'summary': '',
'description': """
Report Configurator - Account Voucher
=====================================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'report_extended',
'account_voucher',
],
'data': [
'views/report_view.xml',
'views/account_voucher_view.xml',
'views/account_action_data.xml',
'voucher_email_data.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
4789e8e8024fd25e1d0191ee8de91ef8a97c9943 | Fix case study redirect | uktrade/navigator,uktrade/navigator,uktrade/navigator,uktrade/navigator | app/casestudy/views.py | app/casestudy/views.py | from django.views.generic import TemplateView
from django.shortcuts import redirect
from thumber.decorators import thumber_feedback
from casestudy.casestudies import CASE_STUDIES
@thumber_feedback
class CaseStudyView(TemplateView):
"""
The simple view for a case story page
"""
comment_placeholder = "We are sorry to hear that. Would you tell us why?"
submit_wording = "Send feedback"
template_name = 'case_study.html'
def dispatch(self, *args, **kwargs):
try:
self.story = CASE_STUDIES[self.kwargs['slug']]
return super().dispatch(*args, **kwargs)
except:
return redirect('/')
def get_other_stories(self):
other_stories = []
for key, value in CASE_STUDIES.items():
if key != self.kwargs['slug']:
other_stories.append(value)
return other_stories
def get_context_data(self, *args, **kwargs):
return super().get_context_data(
story=self.story,
other_stories=self.get_other_stories(),
*args, **kwargs
)
| from django.views.generic import TemplateView
from django.shortcuts import redirect
from thumber.decorators import thumber_feedback
from casestudy.casestudies import CASE_STUDIES
@thumber_feedback
class CaseStudyView(TemplateView):
"""
The simple view for a case story page
"""
comment_placeholder = "We are sorry to hear that. Would you tell us why?"
submit_wording = "Send feedback"
template_name = 'case_study.html'
def dispatch(self, *args, **kwargs):
try:
self.story = CASE_STUDIES.get(self.kwargs['slug'])
return super().dispatch(*args, **kwargs)
except:
return redirect('/')
def get_other_stories(self):
other_stories = []
for key, value in CASE_STUDIES.items():
if key != self.kwargs['slug']:
other_stories.append(value)
return other_stories
def get_context_data(self, *args, **kwargs):
return super().get_context_data(
story=self.story,
other_stories=self.get_other_stories(),
*args, **kwargs
)
| mit | Python |
70d9f68f78d8b4ba8e08b445a1b508806c0463f7 | Fix home url | Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization | organization/pages/models.py | organization/pages/models.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse, reverse_lazy
from mezzanine.core.models import Displayable, Slugged, Orderable
from organization.core.models import *
class CustomPage(Page, SubTitled, RichText):
class Meta:
verbose_name = 'custom page'
class PageBlock(Block):
page = models.ForeignKey(Page, verbose_name=_('page'), related_name='blocks', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _("block")
verbose_name_plural = _("blocks")
verbose_name = 'page block'
class PageImage(Image):
page = models.ForeignKey(Page, verbose_name=_('page'), related_name='images', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _("image")
verbose_name_plural = _("images")
order_with_respect_to = "page"
class DynamicContentHomeSlider(DynamicContent, Orderable):
home = models.ForeignKey("home", verbose_name=_('home'), blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = 'Dynamic Content Home Slider'
class DynamicContentHomeBody(DynamicContent, Orderable):
home = models.ForeignKey("home", verbose_name=_('home'), blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = 'Dynamic Content Home Body'
class Home(Displayable):
class Meta:
verbose_name = _('home')
verbose_name_plural = _("homes")
def get_absolute_url(self):
return reverse("organization-home")
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse, reverse_lazy
from mezzanine.core.models import Displayable, Slugged, Orderable
from organization.core.models import *
class CustomPage(Page, SubTitled, RichText):
class Meta:
verbose_name = 'custom page'
class PageBlock(Block):
page = models.ForeignKey(Page, verbose_name=_('page'), related_name='blocks', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _("block")
verbose_name_plural = _("blocks")
verbose_name = 'page block'
class PageImage(Image):
page = models.ForeignKey(Page, verbose_name=_('page'), related_name='images', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _("image")
verbose_name_plural = _("images")
order_with_respect_to = "page"
class DynamicContentHomeSlider(DynamicContent, Orderable):
home = models.ForeignKey("home", verbose_name=_('home'), blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = 'Dynamic Content Home Slider'
class DynamicContentHomeBody(DynamicContent, Orderable):
home = models.ForeignKey("home", verbose_name=_('home'), blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = 'Dynamic Content Home Body'
class Home(Displayable):
class Meta:
verbose_name = _('home')
verbose_name_plural = _("homes")
def get_absolute_url(self):
return reverse("organization-home", kwargs={"slug": self.slug})
| agpl-3.0 | Python |
8255eb399559f786eeec245c44877066ed878883 | change MAX_NUM_QUESTIONS_PER_ELECTION on eorchestra settings | agoravoting/agora-dev-box,agoravoting/agora-dev-box,agoravoting/agora-dev-box,agoravoting/agora-dev-box,agoravoting/agora-dev-box | eorchestra/templates/base_settings.py | eorchestra/templates/base_settings.py | # This file is part of agora-dev-box.
# Copyright (C) 2014-2016 Agora Voting SL <agora@agoravoting.com>
# agora-dev-box is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora-dev-box is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with agora-dev-box. If not, see <http://www.gnu.org/licenses/>.
# debug, set to false on production deployment
DEBUG = False
ROOT_URL = 'https://{{ config.host }}:{{ config.port }}/api/queues'
# URL to our HTTP server
VFORK_SERVER_URL = 'http://{{ config.host }}'
VFORK_SERVER_PORT_RANGE = {{ config.vfork_server_ports }}
# Socket address given as <hostname>:<port> to our hint server.
# A hint server is a simple UDP server that reduces latency and
# traffic on the HTTP servers.
VFORK_HINT_SERVER_SOCKET = '{{ config.host }}'
VFORK_HINT_SERVER_PORT_RANGE = {{ config.vfork_hint_server_ports }}
import os
ROOT_PATH = os.path.split(os.path.abspath(__file__))[0]
# SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/db.sqlite' % ROOT_PATH
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2:///eorchestra'
PRIVATE_DATA_PATH = os.path.join(ROOT_PATH, 'datastore/private')
PUBLIC_DATA_PATH = '/srv/election-orchestra/server1/public'
PUBLIC_DATA_BASE_URL = 'https://{{ config.host }}:{{ config.port }}/public_data'
# security configuration
SSL_CERT_PATH = '/srv/certs/selfsigned/cert.pem'
SSL_KEY_PATH = '/srv/certs/selfsigned/key-nopass.pem'
SSL_CALIST_PATH = '/srv/certs/selfsigned/calist'
ALLOW_ONLY_SSL_CONNECTIONS = True
AUTOACCEPT_REQUESTS = {{ config.auto_mode }}
KILL_ALL_VFORK_BEFORE_START_NEW = True
# Maximum number of questions per election
MAX_NUM_QUESTIONS_PER_ELECTION = {{ config.election_limits.max_num_questions }}
QUEUES_OPTIONS = {
'vfork_queue': {
'max_threads': 1,
}
}
| # This file is part of agora-dev-box.
# Copyright (C) 2014-2016 Agora Voting SL <agora@agoravoting.com>
# agora-dev-box is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora-dev-box is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with agora-dev-box. If not, see <http://www.gnu.org/licenses/>.
# debug, set to false on production deployment
DEBUG = False
ROOT_URL = 'https://{{ config.host }}:{{ config.port }}/api/queues'
# URL to our HTTP server
VFORK_SERVER_URL = 'http://{{ config.host }}'
VFORK_SERVER_PORT_RANGE = {{ config.vfork_server_ports }}
# Socket address given as <hostname>:<port> to our hint server.
# A hint server is a simple UDP server that reduces latency and
# traffic on the HTTP servers.
VFORK_HINT_SERVER_SOCKET = '{{ config.host }}'
VFORK_HINT_SERVER_PORT_RANGE = {{ config.vfork_hint_server_ports }}
import os
ROOT_PATH = os.path.split(os.path.abspath(__file__))[0]
# SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/db.sqlite' % ROOT_PATH
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2:///eorchestra'
PRIVATE_DATA_PATH = os.path.join(ROOT_PATH, 'datastore/private')
PUBLIC_DATA_PATH = '/srv/election-orchestra/server1/public'
PUBLIC_DATA_BASE_URL = 'https://{{ config.host }}:{{ config.port }}/public_data'
# security configuration
SSL_CERT_PATH = '/srv/certs/selfsigned/cert.pem'
SSL_KEY_PATH = '/srv/certs/selfsigned/key-nopass.pem'
SSL_CALIST_PATH = '/srv/certs/selfsigned/calist'
ALLOW_ONLY_SSL_CONNECTIONS = True
AUTOACCEPT_REQUESTS = {{ config.auto_mode }}
KILL_ALL_VFORK_BEFORE_START_NEW = True
QUEUES_OPTIONS = {
'vfork_queue': {
'max_threads': 1,
}
}
| agpl-3.0 | Python |
0dbbde4eebf4af51d03d1739e9d370885cec37dd | fix bad escaping | djrobstep/migra,djrobstep/migra | migra/statements.py | migra/statements.py | from __future__ import unicode_literals
import re
def check_for_drop(s):
return not not re.search(r"(drop\s+)", s, re.IGNORECASE)
class Statements(list):
def __init__(self, *args, **kwargs):
self.safe = True
super(Statements, self).__init__(*args, **kwargs)
@property
def sql(self):
if self.safe:
self.raise_if_unsafe()
if not self:
return ""
return "\n\n".join(self) + "\n\n"
def raise_if_unsafe(self):
if any(check_for_drop(s) for s in self):
raise UnsafeMigrationException(
"unsafe/destructive change being autogenerated, refusing to carry on further"
)
def __add__(self, other):
self += list(other)
return self
class UnsafeMigrationException(Exception):
pass
| from __future__ import unicode_literals
import re
def check_for_drop(s):
return not not re.search("(drop\s+)", s, re.IGNORECASE)
class Statements(list):
def __init__(self, *args, **kwargs):
self.safe = True
super(Statements, self).__init__(*args, **kwargs)
@property
def sql(self):
if self.safe:
self.raise_if_unsafe()
if not self:
return ""
return "\n\n".join(self) + "\n\n"
def raise_if_unsafe(self):
if any(check_for_drop(s) for s in self):
raise UnsafeMigrationException(
"unsafe/destructive change being autogenerated, refusing to carry on further"
)
def __add__(self, other):
self += list(other)
return self
class UnsafeMigrationException(Exception):
pass
| unlicense | Python |
2b7d50f9ac6d0f4f6a032e60053b5923c292a0a1 | Remove unused assert | zsloan/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2 | wqflask/utility/corr_result_helpers.py | wqflask/utility/corr_result_helpers.py | def normalize_values(a_values, b_values):
"""
Trim two lists of values to contain only the values they both share
Given two lists of sample values, trim each list so that it contains
only the samples that contain a value in both lists. Also returns
the number of such samples.
>>> normalize_values([2.3, None, None, 3.2, 4.1, 5], [3.4, 7.2, 1.3, None, 6.2, 4.1])
([2.3, 4.1, 5], [3.4, 6.2, 4.1], 3)
"""
min_length = min(len(a_values), len(b_values))
a_new = []
b_new = []
for a, b in zip(a_values, b_values):
if not (a == None or b == None):
a_new.append(a)
b_new.append(b)
return a_new, b_new, len(a_new)
def common_keys(a_samples, b_samples):
"""
>>> a = dict(BXD1 = 9.113, BXD2 = 9.825, BXD14 = 8.985, BXD15 = 9.300)
>>> b = dict(BXD1 = 9.723, BXD3 = 9.825, BXD14 = 9.124, BXD16 = 9.300)
>>> sorted(common_keys(a, b))
['BXD1', 'BXD14']
"""
return set(a_samples.keys()).intersection(set(b_samples.keys()))
def normalize_values_with_samples(a_samples, b_samples):
common_samples = common_keys(a_samples, b_samples)
a_new = {}
b_new = {}
for sample in common_samples:
a_new[sample] = a_samples[sample]
b_new[sample] = b_samples[sample]
return a_new, b_new, num_overlap
| def normalize_values(a_values, b_values):
"""
Trim two lists of values to contain only the values they both share
Given two lists of sample values, trim each list so that it contains
only the samples that contain a value in both lists. Also returns
the number of such samples.
>>> normalize_values([2.3, None, None, 3.2, 4.1, 5], [3.4, 7.2, 1.3, None, 6.2, 4.1])
([2.3, 4.1, 5], [3.4, 6.2, 4.1], 3)
"""
min_length = min(len(a_values), len(b_values))
a_new = []
b_new = []
for a, b in zip(a_values, b_values):
if not (a == None or b == None):
a_new.append(a)
b_new.append(b)
return a_new, b_new, len(a_new)
def common_keys(a_samples, b_samples):
"""
>>> a = dict(BXD1 = 9.113, BXD2 = 9.825, BXD14 = 8.985, BXD15 = 9.300)
>>> b = dict(BXD1 = 9.723, BXD3 = 9.825, BXD14 = 9.124, BXD16 = 9.300)
>>> sorted(common_keys(a, b))
['BXD1', 'BXD14']
"""
return set(a_samples.keys()).intersection(set(b_samples.keys()))
def normalize_values_with_samples(a_samples, b_samples):
common_samples = common_keys(a_samples, b_samples)
a_new = {}
b_new = {}
for sample in common_samples:
a_new[sample] = a_samples[sample]
b_new[sample] = b_samples[sample]
num_overlap = len(a_new)
assert num_overlap == len(b_new), "Lengths should be the same"
return a_new, b_new, num_overlap
| agpl-3.0 | Python |
6d2b0a7f34978fd77b906bb24f4056eeea779c9e | update release to 2.1.29-dev | kvick/aminator,coryb/aminator,bmoyles/aminator,Netflix/aminator | aminator/__init__.py | aminator/__init__.py | # -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator
========
Create images from packages for deployment in various cloud formations
"""
import logging
try:
from logging import NullHandler
except ImportError:
# py26
try:
from logutils import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__version__ = '2.1.29-dev'
__versioninfo__ = __version__.split('.')
__all__ = ()
logging.getLogger(__name__).addHandler(NullHandler())
| # -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator
========
Create images from packages for deployment in various cloud formations
"""
import logging
try:
from logging import NullHandler
except ImportError:
# py26
try:
from logutils import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__version__ = '2.1.28-dev'
__versioninfo__ = __version__.split('.')
__all__ = ()
logging.getLogger(__name__).addHandler(NullHandler())
| apache-2.0 | Python |
4955c9c797300ea3db5ef4c2083d2cb4379fc926 | allow specifying splitter for `camelcaseify` | tek/amino | amino/util/string.py | amino/util/string.py | import re
from functools import singledispatch # type: ignore
def snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@singledispatch
def decode(value):
return value
@decode.register(bytes)
def decode_bytes(value):
return value.decode()
@decode.register(list)
def decode_list(value):
from amino import List
return List.wrap(value).map(decode)
@decode.register(dict)
def decode_dict(value):
from amino import Map
return Map.wrap(value)\
.keymap(decode)\
.valmap(decode)
@decode.register(Exception)
def decode_exc(value):
return decode_list(value.args).head | str(value)
def camelcaseify(name, sep='', splitter='_'):
return sep.join([n.capitalize() for n in re.split(splitter, name)])
def safe_string(value):
from amino import Try
return Try(str, value).or_else(lambda: Try(repr, value)) | 'invalid'
__all__ = ('snake_case', 'decode', 'camelcaseify')
| import re
from functools import singledispatch # type: ignore
def snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@singledispatch
def decode(value):
return value
@decode.register(bytes)
def decode_bytes(value):
return value.decode()
@decode.register(list)
def decode_list(value):
from amino import List
return List.wrap(value).map(decode)
@decode.register(dict)
def decode_dict(value):
from amino import Map
return Map.wrap(value)\
.keymap(decode)\
.valmap(decode)
@decode.register(Exception)
def decode_exc(value):
return decode_list(value.args).head | str(value)
def camelcaseify(name, sep=''):
return sep.join([n.capitalize() for n in name.split('_')])
def safe_string(value):
from amino import Try, L
return Try(str, value).or_else(lambda: Try(repr, value)) | 'invalid'
__all__ = ('snake_case', 'decode', 'camelcaseify')
| mit | Python |
10822c71c04a8a2368b59012b88cbbccf9c65d7c | revert version bump up to 0.0.3.10: I merged many updates since then, so have to check carefully more before the next release | pmquang/python-anyconfig,ssato/python-anyconfig,ssato/python-anyconfig,pmquang/python-anyconfig | anyconfig/globals.py | anyconfig/globals.py | #
# Copyright (C) 2013 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
import logging
AUTHOR = 'Satoru SATOH <ssat@redhat.com>'
VERSION = "0.0.3.9"
_LOGGING_FORMAT = "%(asctime)s %(name)s: [%(levelname)s] %(message)s"
def getLogger(name="anyconfig", format=_LOGGING_FORMAT,
level=logging.WARNING, **kwargs):
"""
Initialize custom logger.
"""
logging.basicConfig(level=level, format=format)
logger = logging.getLogger(name)
h = logging.StreamHandler()
h.setLevel(level)
h.setFormatter(logging.Formatter(format))
logger.addHandler(h)
return logger
LOGGER = getLogger()
# vim:sw=4:ts=4:et:
| #
# Copyright (C) 2013 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
import logging
AUTHOR = 'Satoru SATOH <ssat@redhat.com>'
VERSION = "0.0.3.10"
_LOGGING_FORMAT = "%(asctime)s %(name)s: [%(levelname)s] %(message)s"
def getLogger(name="anyconfig", format=_LOGGING_FORMAT,
level=logging.WARNING, **kwargs):
"""
Initialize custom logger.
"""
logging.basicConfig(level=level, format=format)
logger = logging.getLogger(name)
h = logging.StreamHandler()
h.setLevel(level)
h.setFormatter(logging.Formatter(format))
logger.addHandler(h)
return logger
LOGGER = getLogger()
# vim:sw=4:ts=4:et:
| mit | Python |
069b60e5860b6af81c136f55c84b6d1930442285 | Fix plugin_test for workflow object | gareth8118/spreads,miloh/spreads,miloh/spreads,nafraf/spreads,jbaiter/spreads,gareth8118/spreads,gareth8118/spreads,nafraf/spreads,adongy/spreads,miloh/spreads,adongy/spreads,jbaiter/spreads,DIYBookScanner/spreads,DIYBookScanner/spreads,DIYBookScanner/spreads,nafraf/spreads,adongy/spreads,jbaiter/spreads | test/plugin_test.py | test/plugin_test.py | from mock import call, MagicMock as Mock, patch
from nose.tools import raises
import spreads.confit as confit
import spreads.plugin as plugin
from spreads.util import DeviceException
class TestPlugin(object):
def setUp(self):
pass
def test_get_driver(self):
cfg = confit.Configuration('test_plugin')
cfg["driver"] = u"dummy"
assert "dummy" in plugin.get_driver(cfg).names()
@patch('spreads.plugin.get_driver')
def test_get_devices(self, get_driver):
cfg = Mock()
driver = Mock()
usb_mock = Mock()
plugin.usb.core.find = Mock(return_value=[usb_mock])
get_driver.return_value = driver
plugin.get_driver = get_driver
plugin.get_devices(cfg)
assert call(cfg, usb_mock) in driver.driver.call_args_list
assert driver.driver.match.call_args_list == [call(usb_mock)]
@raises(DeviceException)
@patch('spreads.plugin.get_driver')
def test_no_devices(self, get_driver):
cfg = Mock()
driver = Mock()
driver.driver.match = Mock(return_value=False)
usb_mock = Mock()
plugin.usb.core.find = Mock(return_value=[usb_mock])
get_driver.return_value = driver
plugin.get_driver = get_driver
plugin.get_devices(cfg)
| from mock import call, MagicMock as Mock, patch
from nose.tools import raises
import spreads
import spreads.plugin as plugin
from spreads.util import DeviceException
class TestPlugin(object):
def setUp(self):
reload(plugin)
spreads.config.clear()
spreads.config.read(user=False)
spreads.config['plugins'] = []
def test_pluginmanager(self):
plugin.SpreadsNamedExtensionManager = Mock()
pm = plugin.get_pluginmanager()
pm_new = plugin.get_pluginmanager()
assert pm is pm_new
def test_get_devices(self):
device = Mock()
device.match = Mock(return_value=True)
device.__name__ = "Mock"
usb_mock = Mock()
extension_mock = Mock()
plugin.usb.core.find = Mock(return_value=[usb_mock])
plugin.DriverManager = Mock(return_value=extension_mock)
extension_mock.driver = device
plugin.get_devices()
assert call(spreads.config, usb_mock) in device.call_args_list
assert device.match.call_args_list == [call(usb_mock)]
@raises(DeviceException)
def test_no_devices(self):
device_a = Mock()
device_a.plugin.match = Mock(return_value=True)
device_b = Mock()
device_b.plugin.match = Mock(return_value=False)
plugin.usb.core.find = Mock(return_value=[])
dm = Mock()
dm.map = lambda x, y: [x(z, y) for z in [device_a, device_b]]
plugin.get_devicemanager = Mock(return_value=dm)
plugin.get_devices()
| agpl-3.0 | Python |
48f44fb2c499ac1ee6fed160d9d2e06a549fbcea | add __all__ string to scene/cameras/__init__.py | Eric89GXL/vispy,Eric89GXL/vispy,Eric89GXL/vispy | vispy/scene/cameras/__init__.py | vispy/scene/cameras/__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Cameras are responsible for determining which part of a scene is displayed
in a viewbox and for handling user input to change the view.
Several Camera subclasses are available to customize the projection of the
scene such as 3D perspective and orthographic projections, 2D
scale/translation, and other specialty cameras. A variety of user interaction
styles are available for each camera including arcball, turntable,
first-person, and pan/zoom interactions.
Internally, Cameras work by setting the transform of a SubScene object such
that a certain part of the scene is mapped to the bounding rectangle of the
ViewBox.
"""
__all__ = ['ArcballCamera', 'BaseCamera', 'FlyCamera', 'MagnifyCamera',
'Magnify1DCamera', 'PanZoomCamera', 'TurntableCamera']
from ._base import make_camera # noqa
from .base_camera import BaseCamera # noqa
from .panzoom import PanZoomCamera # noqa
from .arcball import ArcballCamera # noqa
from .turntable import TurntableCamera # noqa
from .fly import FlyCamera # noqa
from .magnify import MagnifyCamera, Magnify1DCamera # noqa
| # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Cameras are responsible for determining which part of a scene is displayed
in a viewbox and for handling user input to change the view.
Several Camera subclasses are available to customize the projection of the
scene such as 3D perspective and orthographic projections, 2D
scale/translation, and other specialty cameras. A variety of user interaction
styles are available for each camera including arcball, turntable,
first-person, and pan/zoom interactions.
Internally, Cameras work by setting the transform of a SubScene object such
that a certain part of the scene is mapped to the bounding rectangle of the
ViewBox.
"""
from ._base import make_camera # noqa
from .base_camera import BaseCamera # noqa
from .panzoom import PanZoomCamera # noqa
from .arcball import ArcballCamera # noqa
from .turntable import TurntableCamera # noqa
from .fly import FlyCamera # noqa
from .magnify import MagnifyCamera, Magnify1DCamera # noqa
| bsd-3-clause | Python |
b9fccc33f3fb1c6408701f11c6a894c9001f843a | Fix YoutubeScrape error handling | willkg/steve,pyvideo/steve,willkg/steve,pyvideo/steve,willkg/steve,pyvideo/steve,CarlFK/steve,CarlFK/steve,CarlFK/steve | steve/scrapers.py | steve/scrapers.py | #######################################################################
# This file is part of steve.
#
# Copyright (C) 2012-2015 Will Kahn-Greene
# Licensed under the Simplified BSD License. See LICENSE for full
# license.
#######################################################################
from datetime import datetime
from urlparse import urlparse
import json
import subprocess
class ScraperError(Exception):
pass
class Scraper(object):
def scrape(self, url):
"""Takes a url and returns list of dicts or None if not handled"""
raise NotImplemented
class YoutubeScraper(object):
def transform_item(self, item):
"""Converts youtube-dl output to richard fields"""
return {
'title': item['fulltitle'],
'summary': item['description'],
'description': '',
'state': 1,
'category': '',
'quality_notes': '',
'language': '',
'copyright_text': '',
'thumbnail_url': item['thumbnail'],
'duration': item['duration'],
'source_url': item['webpage_url'],
'whiteboard': '',
'recorded': datetime.strptime(item['upload_date'], '%Y%m%d'),
'slug': '',
'tags': item['categories'],
'speakers': []
}
def scrape(self, url):
"""Scrapes a url by passing it through youtube-dl"""
parts = urlparse(url)
# FIXME: This is a lousy test for whether this is a youtube
# url.
if not parts.netloc.endswith('youtube.com'):
return
try:
output = subprocess.check_output(
['youtube-dl', '-j', url],
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as cpe:
raise ScraperError('youtube-dl said "{0}".'.format(cpe.output))
except OSError:
raise ScraperError('youtube-dl not installed or not on PATH.')
# Each line is a single JSON object.
items = []
for line in output.splitlines():
items.append(json.loads(line))
items = [self.transform_item(item) for item in items]
return items
| #######################################################################
# This file is part of steve.
#
# Copyright (C) 2012-2015 Will Kahn-Greene
# Licensed under the Simplified BSD License. See LICENSE for full
# license.
#######################################################################
from datetime import datetime
from urlparse import urlparse
import json
import subprocess
class Scraper(object):
def scrape(self, url):
"""Takes a url and returns list of dicts or None if not handled"""
raise NotImplemented
class YoutubeScraper(object):
def transform_item(self, item):
"""Converts youtube-dl output to richard fields"""
return {
'title': item['fulltitle'],
'summary': item['description'],
'description': '',
'state': 1,
'category': '',
'quality_notes': '',
'language': '',
'copyright_text': '',
'thumbnail_url': item['thumbnail'],
'duration': item['duration'],
'source_url': item['webpage_url'],
'whiteboard': '',
'recorded': datetime.strptime(item['upload_date'], '%Y%m%d'),
'slug': '',
'tags': item['categories'],
'speakers': []
}
def scrape(self, url):
parts = urlparse(url)
# FIXME: This is a lousy test for whether this is a youtube
# url.
if not parts.netloc.endswith('youtube.com'):
return
# FIXME: hardcoded path for youtube-dl command.
# FIXME: needs better error handling.
output = subprocess.check_output(['youtube-dl', '-j', url])
# Each line is a single JSON object.
items = []
for line in output.splitlines():
items.append(json.loads(line))
items = [self.transform_item(item) for item in items]
return items
| bsd-2-clause | Python |
11b35c79ff8d2108d572929334e4e3f30c70eea5 | Allow debug and verbose modes to be set directly from config. | Agent-Isai/lykos,billion57/lykos,Diitto/lykos,Cr0wb4r/lykos | modules/__init__.py | modules/__init__.py | import argparse
import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Handle launch parameters
# Argument --debug means start in debug mode
# --verbose means to print a lot of stuff (when not in debug mode)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--sabotage', action='store_true')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
debug_mode = args.debug
verbose = args.verbose
sabotage = args.sabotage
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if setting == "DEBUG_MODE":
debug_mode = value
if setting == "VERBOSE_MODE":
verbose = value
if setting == "DEFAULT_MODULE":
sabotage = value
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
botconfig.DEBUG_MODE = debug_mode if not botconfig.DISABLE_DEBUG_MODE else False
botconfig.VERBOSE_MODE = verbose
botconfig.DEFAULT_MODULE = "sabotage" if args.sabotage else "wolfgame"
# Initialize Database
var.init_db()
| import argparse
import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
# Handle launch parameters
# Argument --debug means start in debug mode
# --verbose means to print a lot of stuff (when not in debug mode)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--sabotage', action='store_true')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
botconfig.DEBUG_MODE = args.debug if not botconfig.DISABLE_DEBUG_MODE else False
botconfig.VERBOSE_MODE = args.verbose
botconfig.DEFAULT_MODULE = "sabotage" if args.sabotage else "wolfgame"
# Initialize Database
var.init_db()
| bsd-2-clause | Python |
af410659c8c9664c6e2e86d0c88fe9f1cfd2cceb | Fix karmamod for mongodb | billyvg/piebot | modules/karmamod.py | modules/karmamod.py | """Keeps track of karma counts.
@package ppbot
@syntax .karma <item>
"""
import re
from modules import *
from models import Model
class Karmamod(Module):
def __init__(self, *args, **kwargs):
"""Constructor"""
Module.__init__(self, kwargs=kwargs)
def _register_events(self):
self.add_command('karma', 'get_karma')
self.add_event('pubmsg', 'parsekarma')
@op
def get_karma(self, event):
karma = self.db.karma.find_one({'name': event['args'][0],
'source': event['target']})
try:
result = karma['count']
except KeyError:
result = 0
self.msg(event['target'], '%s has %d karma.' % (event['args'][0], result))
def parsekarma(self, event):
inc_pattern = re.compile('([^ ]{2,})\+\+')
m = inc_pattern.findall(event['message'])
for term in m:
self.change(event, term, 1)
dec_pattern = re.compile('([^ ]{2,})--')
m = dec_pattern.findall(event['message'])
for term in m:
self.change(event, term, -1)
def change(self, event, name, value):
"""Change karma count."""
karma = self.db.karma.find_one({'name': name,
'source': event['target']})
# TODO: find way to insert if doesn't exist or else update?
try:
count = karma['count'] + value
self.db.karma.update({'name': name,
'source': event['target']},
{'count': count})
except TypeError, KeyError:
count = value
self.db.karma.insert({'name': name,
'source': event['target'],
'count': count})
| """Keeps track of karma counts.
@package ppbot
@syntax .karma <item>
"""
import re
from modules import *
from models import Model
class Karmamod(Module):
def __init__(self, *args, **kwargs):
"""Constructor"""
Module.__init__(self, kwargs=kwargs)
def _register_events(self):
self.add_command('karma', 'get_karma')
self.add_event('pubmsg', 'parsekarma')
@op
def get_karma(self, event):
karma = self.db.karma.find_one({'name': event['args'][0],
'source': event['target']})
try:
result = karma['count']
except KeyError:
result = 0
self.msg(event['target'], '%s has %d karma.' % (event['args'][0], result))
def parsekarma(self, event):
inc_pattern = re.compile('([^ ]{2,})\+\+')
m = inc_pattern.findall(event['message'])
for term in m:
self.change(event, term, 1)
dec_pattern = re.compile('([^ ]{2,})--')
m = dec_pattern.findall(event['message'])
for term in m:
self.change(event, term, -1)
def change(self, event, name, value):
"""Change karma count."""
karma = self.db.karma.find_one({'name': event['args'][0],
'source': event['target']})
# TODO: find way to insert if doesn't exist or else update?
try:
count = karma['count'] + value
self.db.karma.update({'name': event['args'][0],
'source': event['target']},
{'count': count})
except KeyError:
count = value
self.db.karma.insert({'name': event['args'][0],
'source': event['target'],
'count': count})
| mit | Python |
bfb909281d567334e614452656bb4085f071262d | Use argument parser for hades-su | agdsn/hades,agdsn/hades,agdsn/hades,agdsn/hades,agdsn/hades | src/hades/bin/su.py | src/hades/bin/su.py | import grp
import logging
import os
import pwd
import sys
from hades.common.cli import ArgumentParser, parser as common_parser
logger = logging.getLogger(__name__)
def drop_privileges(passwd, group):
if os.geteuid() != 0:
logger.error("Can't drop privileges (EUID != 0)")
return
os.setgid(group.gr_gid)
os.initgroups(passwd.pw_name, group.gr_gid)
os.setuid(passwd.pw_uid)
def main():
parser = ArgumentParser(parents=[common_parser])
parser.add_argument('user')
parser.add_argument('command')
parser.add_argument('arguments', nargs='*')
args = parser.parse_args()
try:
passwd = pwd.getpwnam(args.user)
group = grp.getgrgid(passwd.pw_gid)
except KeyError:
print("No such user or group")
return os.EX_NOUSER
filename = args.command
try:
drop_privileges(passwd, group)
os.execvp(filename, [filename] + args.arguments)
except (FileNotFoundError, PermissionError):
print("Could not execute {}".format(filename), file=sys.stderr)
return os.EX_NOINPUT
except OSError:
logger.exception("An OSError occurred")
return os.EX_OSERR
if __name__ == '__main__':
sys.exit(main())
| import grp
import logging
import os
import pwd
import sys
logger = logging.getLogger(__name__)
def drop_privileges(passwd, group):
if os.geteuid() != 0:
logger.error("Can't drop privileges (EUID != 0)")
return
os.setgid(group.gr_gid)
os.initgroups(passwd.pw_name, group.gr_gid)
os.setuid(passwd.pw_uid)
def main():
args = sys.argv
if len(args) < 3:
print("Usage: {} USER COMMANDS [ARGS...]".format(args[0]))
return os.EX_USAGE
try:
passwd = pwd.getpwnam(args[1])
group = grp.getgrgid(passwd.pw_gid)
except KeyError:
print("No such user or group")
return os.EX_NOUSER
filename = args[2]
try:
drop_privileges(passwd, group)
os.execvp(filename, args[2:])
except (FileNotFoundError, PermissionError):
print("Could not execute {}".format(filename), file=sys.stderr)
return os.EX_NOINPUT
except OSError:
logger.exception("An OSError occurred")
return os.EX_OSERR
if __name__ == '__main__':
sys.exit(main())
| mit | Python |
8fe8717b4e2afe6329d2dd25210371df3eab2b4f | Test that we reject bad TLS versions | python-hyper/pep543 | test/test_stdlib.py | test/test_stdlib.py | # -*- coding: utf-8 -*-
"""
Tests for the standard library PEP 543 shim.
"""
import pep543
import pep543.stdlib
import pytest
from .backend_tests import SimpleNegotiation
CONTEXTS = (
pep543.stdlib.STDLIB_BACKEND.client_context,
pep543.stdlib.STDLIB_BACKEND.server_context
)
def assert_wrap_fails(context, exception):
"""
A convenient helper that calls wrap_buffers with the appropriate number of
arugments and asserts that it raises the appropriate error.
"""
if isinstance(context, pep543.stdlib.STDLIB_BACKEND.client_context):
with pytest.raises(exception):
context.wrap_buffers(server_hostname=None)
else:
with pytest.raises(exception):
context.wrap_buffers()
class TestSimpleNegotiationStdlib(SimpleNegotiation):
BACKEND = pep543.stdlib.STDLIB_BACKEND
class TestStdlibErrorHandling(object):
"""
Validate that the stdlib backend can do sensible error handling in specific
situations that it cannot handle.
"""
@pytest.mark.parametrize(
'lowest,highest', (
(object(), None), (None, object()), (object(), object())
)
)
@pytest.mark.parametrize('context', CONTEXTS)
def test_bad_values_for_versions_client(self, lowest, highest, context):
"""
Using TLSConfiguration objects with a bad value for their minimum
version raises a TLSError with Client contexts.
"""
config = pep543.TLSConfiguration(
validate_certificates=False,
lowest_supported_version=lowest,
highest_supported_version=highest
)
ctx = context(config)
assert_wrap_fails(ctx, pep543.TLSError)
| # -*- coding: utf-8 -*-
"""
Tests for the standard library PEP 543 shim.
"""
import pep543.stdlib
from .backend_tests import SimpleNegotiation
class TestSimpleNegotiationStdlib(SimpleNegotiation):
BACKEND = pep543.stdlib.STDLIB_BACKEND
| mit | Python |
d415eb84b699a8f31451734599e14c44d97d0c74 | fix for imgur album downloads | regosen/gallery_get | gallery_plugins/plugin_imgur_album.py | gallery_plugins/plugin_imgur_album.py | # Plugin for gallery_get.
# Each definition can be one of the following:
# - a string to match
# - a regex string to match
# - a function that takes source as a parameter and returns an array or a single match. (You may assume that re and urllib are already imported.)
# If you comment out a parameter, it will use the default defined in __init__.py
# identifier (default = name of this plugin after "plugin_") : If there's a match, we'll attempt to download images using this plugin.
identifier = "imgur.+album.css"
# title: parses the gallery page for a title. This will be the folder name of the output gallery.
title = r'data-title="(.*?)"'
# redirect: if the links in the gallery page go to an html instead of an image, use this to parse the gallery page.
# direct_links: if redirect is non-empty, this parses each redirect page for a single image. Otherwise, this parses the gallery page for all images.
# * if using regex, you can have two matches: the first will be the link and the second will be the basename of the file.
# if the matches need to be reversed, use named groups "link" and "basename"
def direct_links(source):
start = source.find("images :", source.find("Imgur.Album"))+14
end = source.find("]}", start) + 2
albumimages = []
rawAlbumdata = source[start:end].replace(":false,",":False,").replace(":true,",":True,")
if rawAlbumdata.strip():
albumdata = eval(rawAlbumdata)
for i in albumdata["items"]:
albumimages.append( "http://i.imgur.com/"+i["hash"]+i["ext"] )
return albumimages
# same_filename (default=False): if True, uses same filename from remote link. Otherwise, creates own filename with incremental index (or uses subtitle).
| # Plugin for gallery_get.
# Each definition can be one of the following:
# - a string to match
# - a regex string to match
# - a function that takes source as a parameter and returns an array or a single match. (You may assume that re and urllib are already imported.)
# If you comment out a parameter, it will use the default defined in __init__.py
# identifier (default = name of this plugin after "plugin_") : If there's a match, we'll attempt to download images using this plugin.
identifier = "imgur.+album.css"
# title: parses the gallery page for a title. This will be the folder name of the output gallery.
title = r'data-title="(.*?)"'
# redirect: if the links in the gallery page go to an html instead of an image, use this to parse the gallery page.
# direct_links: if redirect is non-empty, this parses each redirect page for a single image. Otherwise, this parses the gallery page for all images.
# * if using regex, you can have two matches: the first will be the link and the second will be the basename of the file.
# if the matches need to be reversed, use named groups "link" and "basename"
def direct_links(source):
start = source.find("images :", source.find("Imgur.Album"))+14
end = source.find("]}", start) + 2
albumimages = []
rawAlbumdata = source[start:end]
if rawAlbumdata.strip():
albumdata = eval(rawAlbumdata)
for i in albumdata["items"]:
albumimages.append( "http://i.imgur.com/"+i["hash"]+i["ext"] )
return albumimages
# same_filename (default=False): if True, uses same filename from remote link. Otherwise, creates own filename with incremental index (or uses subtitle). | mit | Python |
b8cf132bc4cbf4b7c17812c3429cc96a0a07a18e | update accounts admin | tarvitz/djtp,tarvitz/djtp,tarvitz/djtp,tarvitz/djtp | apps/accounts/admin.py | apps/accounts/admin.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse_lazy as reverse
from django import forms
from django.forms.util import ErrorList
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from apps.accounts.models import User
# UserAdmin.list_display += ('field', )
class UserCreationForm(forms.ModelForm):
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class Meta:
model = User
fields = ('username', )
class UserAdminForm(forms.ModelForm):
class Meta:
model = User
UserAdmin.form = UserAdminForm
UserAdmin.add_form = UserCreationForm
UserAdmin.fieldsets[1][1]['fields'] = ('first_name', 'last_name', 'email', )
admin.site.register(User, UserAdmin)
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse_lazy as reverse
from django import forms
from django.forms.util import ErrorList
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from apps.accounts.models import User
# UserAdmin.list_display += ('field', )
class UserCreationForm(forms.ModelForm):
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class Meta:
model = User
fields = ('username', )
class UserAdminForm(forms.ModelForm):
class Meta:
model = User
UserAdmin.form = UserAdminForm
UserAdmin.add_form = UserCreationForm
UserAdmin.fieldsets[1][1]['fields'] = ('first_name', 'last_name', 'email', 'is_translator', 'is_manager', )
admin.site.register(User, UserAdmin)
| bsd-3-clause | Python |
774443b9d00f311bb656dec8fbc66378cfe876a9 | Make launch_testing.markers.retry_on_failure decorator more robust. (#352) | ros2/launch,ros2/launch,ros2/launch | launch_testing/launch_testing/markers.py | launch_testing/launch_testing/markers.py | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import unittest
def keep_alive(test_description):
"""Mark a test launch description to be kept alive after fixture processes' termination."""
if not hasattr(test_description, '__markers__'):
test_description.__markers__ = {}
test_description.__markers__['keep_alive'] = True
return test_description
def retry_on_failure(*, times):
"""Mark a test case to be retried up to `times` on AssertionError."""
assert times > 0
def _decorator(func):
assert 'self' == list(inspect.signature(func).parameters)[0]
@functools.wraps(func)
def _wrapper(self, *args, **kwargs):
n = times
while n > 1:
try:
ret = func(self, *args, **kwargs)
if isinstance(self, unittest.TestCase):
assert self._outcome.success
return ret
except AssertionError:
self._outcome.errors.clear()
self._outcome.success = True
n -= 1
return func(self, *args, **kwargs)
return _wrapper
return _decorator
| # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
def keep_alive(test_description):
"""Mark a test launch description to be kept alive after fixture processes' termination."""
if not hasattr(test_description, '__markers__'):
test_description.__markers__ = {}
test_description.__markers__['keep_alive'] = True
return test_description
def retry_on_failure(*, times):
"""Mark a test case to be retried up to `times` on AssertionError."""
assert times > 0
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
n = times
while n > 1:
try:
return func(*args, **kwargs)
except AssertionError:
n -= 1
return func(*args, **kwargs)
return _wrapper
return _decorator
| apache-2.0 | Python |
574912ab8b95a4e469a1b22ea0153e3755fcd505 | Rework admin of licenses app | TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker | apps/licenses/admin.py | apps/licenses/admin.py | """
Admin views for the licenses app.
"""
from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from .models import License
class LicenseAdmin(admin.ModelAdmin):
"""
Admin form for the ``License`` data model.
"""
list_display = ('logo_img',
'name',
'view_on_site')
list_display_links = ('logo_img',
'name')
search_fields = ('name',
'description',
'usage',
'source_url')
prepopulated_fields = {'slug': ('name', )}
readonly_fields = ('logo_img',
'last_modification_date')
fieldsets = (
(_('Name and slug'), {
'fields': ('name',
'slug')
}),
(_('Iconography'), {
'fields': ('logo_img',
'logo')
}),
(_('License text'), {
'fields': ('description',
'usage',
'source_url')
}),
(_('Date and time'), {
'fields': ('last_modification_date',)
}),
)
def logo_img(self, obj):
"""
Return the current logo image as html ``<img>``.
:param obj: Current model object.
"""
return '<img src="%s" />' % obj.logo.url if obj.logo else ''
logo_img.short_description = _('Logo')
logo_img.allow_tags = True
def view_on_site(self, obj):
"""
Simple "view on site" inline callback.
:param obj: Current database object.
:return: HTML <a> link to the given object.
"""
return format_html('<a href="{0}" class="link">{1}</a>',
obj.get_absolute_url(),
_('View on site'))
view_on_site.short_description = ''
view_on_site.allow_tags = True
admin.site.register(License, LicenseAdmin)
| """
Admin views for the licenses app.
"""
from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from .models import License
def view_issue_on_site(obj):
"""
Simple "view on site" inline callback.
:param obj: Current database object.
:return: HTML <a> link to the given object.
"""
return format_html('<a href="{0}" class="link">{1}</a>',
obj.get_absolute_url(),
_('View on site'))
view_issue_on_site.short_description = ''
view_issue_on_site.allow_tags = True
class LicenseAdmin(admin.ModelAdmin):
"""
Admin form for the ``License`` data model.
"""
list_display = ('logo_img',
'name',
view_issue_on_site)
list_display_links = ('logo_img',
'name')
search_fields = ('name',
'description',
'usage',
'source_url')
prepopulated_fields = {'slug': ('name',)}
readonly_fields = ('logo_img',
'last_modification_date')
fieldsets = (
(_('Name and slug'), {
'fields': ('name',
'slug')
}),
(_('Iconography'), {
'fields': ('logo_img',
'logo')
}),
(_('License text'), {
'fields': ('description',
'usage',
'source_url')
}),
(_('Date and time'), {
'fields': ('last_modification_date',)
}),
)
def logo_img(self, obj):
"""
Return the current logo image as html ``<img>``.
:param obj: Current model object.
"""
return '<img src="%s" />' % obj.logo.url if obj.logo else ''
logo_img.short_description = _('Logo')
logo_img.allow_tags = True
admin.site.register(License, LicenseAdmin)
| agpl-3.0 | Python |
a2a8f9a2bf9352a99b8ee3750851845f754f6c04 | Use raw_id_field for voucher sender in admin. | onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site | apps/vouchers/admin.py | apps/vouchers/admin.py | from babel.numbers import format_currency
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils import translation
from .models import CustomVoucherRequest, Voucher
class VoucherAdmin(admin.ModelAdmin):
list_filter = ('status',)
list_display = ('created', 'amount_override', 'status', 'sender_email', 'receiver_email')
raw_id_fields = ('sender', 'receiver')
readonly_fields = ('view_order',)
fields = readonly_fields + ('sender', 'receiver', 'status', 'amount', 'currency', 'code', 'sender_email',
'receiver_email', 'receiver_name', 'sender_name', 'message')
def view_order(self, obj):
url = reverse('admin:%s_%s_change' % (obj.order._meta.app_label, obj.order._meta.module_name), args=[obj.order.id])
return "<a href='%s'>View Order</a>" % (str(url))
view_order.allow_tags = True
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100.0, obj.currency, locale=language)
amount_override.short_description = 'amount'
admin.site.register(Voucher, VoucherAdmin)
class CustomVoucherRequestAdmin(admin.ModelAdmin):
list_filter = ('status', 'organization')
list_display = ('created', 'number', 'status', 'contact_name', 'contact_email', 'organization')
admin.site.register(CustomVoucherRequest, CustomVoucherRequestAdmin) | from babel.numbers import format_currency
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils import translation
from .models import CustomVoucherRequest, Voucher
class VoucherAdmin(admin.ModelAdmin):
list_filter = ('status',)
list_display = ('created', 'amount_override', 'status', 'sender_email', 'receiver_email')
raw_id_fields = ('receiver', 'receiver')
readonly_fields = ('view_order',)
fields = readonly_fields + ('sender', 'receiver', 'status', 'amount', 'currency', 'code', 'sender_email',
'receiver_email', 'receiver_name', 'sender_name', 'message')
def view_order(self, obj):
url = reverse('admin:%s_%s_change' % (obj.order._meta.app_label, obj.order._meta.module_name), args=[obj.order.id])
return "<a href='%s'>View Order</a>" % (str(url))
view_order.allow_tags = True
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100.0, obj.currency, locale=language)
amount_override.short_description = 'amount'
admin.site.register(Voucher, VoucherAdmin)
class CustomVoucherRequestAdmin(admin.ModelAdmin):
list_filter = ('status', 'organization')
list_display = ('created', 'number', 'status', 'contact_name', 'contact_email', 'organization')
admin.site.register(CustomVoucherRequest, CustomVoucherRequestAdmin) | bsd-3-clause | Python |
04608636f6e4fc004458560499338af4b871cddb | Make Message a subclass of bytes | meshy/framewirc | asyncio_irc/message.py | asyncio_irc/message.py | from .utils import to_bytes
class Message(bytes):
"""A message recieved from the IRC network."""
def __init__(self, raw_message_bytes_ignored):
super().__init__()
self.prefix, self.command, self.params, self.suffix = self._elements()
def _elements(self):
"""
Split the raw message into it's component parts.
Adapted from http://stackoverflow.com/a/930706/400691
"""
message = self.strip()
prefix = b''
# Odd slicing required for bytes to avoid getting int instead of char
# http://stackoverflow.com/q/28249597/400691
if message[0:1] == b':':
prefix, message = message[1:].split(b' ', 1)
suffix = b''
if b' :' in message:
message, suffix = message.split(b' :', 1)
command, *params = message.split()
params = list(filter(None, params))
return prefix, command, params, suffix
def message_bytes(command, prefix=b'', params=None, suffix=b''):
command = to_bytes(command)
prefix = to_bytes(prefix)
params = list(map(to_bytes, params or []))
suffix = to_bytes(suffix)
message = command
if prefix:
message = b':' + prefix + b' ' + message
if params:
params = b' '.join(params)
message = message + b' ' + params
if suffix:
message = message + b' :' + suffix
return message
| from .utils import to_bytes
class Message:
"""A message recieved from the IRC network."""
def __init__(self, raw_message):
self.raw = raw_message
self.prefix, self.command, self.params, self.suffix = self._elements()
def _elements(self):
"""
Split the raw message into it's component parts.
Adapted from http://stackoverflow.com/a/930706/400691
"""
message = self.raw.strip()
prefix = b''
# Odd slicing required for bytes to avoid getting int instead of char
# http://stackoverflow.com/q/28249597/400691
if message[0:1] == b':':
prefix, message = message[1:].split(b' ', 1)
suffix = b''
if b' :' in message:
message, suffix = message.split(b' :', 1)
command, *params = message.split()
params = list(filter(None, params))
return prefix, command, params, suffix
def message_bytes(command, prefix=b'', params=None, suffix=b''):
command = to_bytes(command)
prefix = to_bytes(prefix)
params = list(map(to_bytes, params or []))
suffix = to_bytes(suffix)
message = command
if prefix:
message = b':' + prefix + b' ' + message
if params:
params = b' '.join(params)
message = message + b' ' + params
if suffix:
message = message + b' :' + suffix
return message
| bsd-2-clause | Python |
8778a7b28030a0b185f006b62fe1305982cf8af0 | Handle unknown fields | cdumay/kser | src/kser/schemas.py | src/kser/schemas.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
from cdumay_error import ValidationError
import marshmallow.exceptions
from marshmallow import Schema, fields, EXCLUDE
from cdumay_result import ResultSchema, Result
class BaseSchema(Schema):
class Meta:
unknown = EXCLUDE
uuid = fields.String(required=True)
entrypoint = fields.String(required=True)
params = fields.Dict(default=dict)
class Base(object):
MARSHMALLOW_SCHEMA = BaseSchema()
def __init__(self, uuid, entrypoint, params=None):
self.uuid = uuid
self.entrypoint = entrypoint
self.params = params if params else dict()
def dump(self):
"""description of dump"""
return self.MARSHMALLOW_SCHEMA.dump(self)
def dumps(self):
"""description of dumps"""
return self.MARSHMALLOW_SCHEMA.dumps(self)
def __str__(self):
return str(self.dump())
class MessageSchema(BaseSchema):
result = fields.Nested(ResultSchema, missing=None)
metadata = fields.Dict()
class Message(Base):
MARSHMALLOW_SCHEMA = MessageSchema()
@classmethod
def loads(cls, json_data):
"""description of load"""
try:
return cls(**cls.MARSHMALLOW_SCHEMA.loads(json_data))
except marshmallow.exceptions.ValidationError as exc:
raise ValidationError("Failed to load message", extra=exc.args[0])
def __init__(self, uuid, entrypoint, params=None, result=None,
metadata=None):
Base.__init__(self, uuid, entrypoint, params)
if result:
if isinstance(result, Result):
self.result = result
else:
self.result = Result(**result)
else:
self.result = Result(uuid=uuid)
self.metadata = metadata or dict()
def __repr__(self):
""""""
return "Message<uuid='{}', entrypoint='{}', result={}>".format(
self.uuid, self.entrypoint, self.result
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
from cdumay_error import ValidationError
import marshmallow.exceptions
from marshmallow import Schema, fields
from cdumay_result import ResultSchema, Result
class BaseSchema(Schema):
uuid = fields.String(required=True)
entrypoint = fields.String(required=True)
params = fields.Dict(default=dict)
class Base(object):
MARSHMALLOW_SCHEMA = BaseSchema()
def __init__(self, uuid, entrypoint, params=None):
self.uuid = uuid
self.entrypoint = entrypoint
self.params = params if params else dict()
def dump(self):
"""description of dump"""
return self.MARSHMALLOW_SCHEMA.dump(self)
def dumps(self):
"""description of dumps"""
return self.MARSHMALLOW_SCHEMA.dumps(self)
def __str__(self):
return str(self.dump())
class MessageSchema(BaseSchema):
result = fields.Nested(ResultSchema, missing=None)
metadata = fields.Dict()
class Message(Base):
MARSHMALLOW_SCHEMA = MessageSchema()
@classmethod
def loads(cls, json_data):
"""description of load"""
try:
return cls(**cls.MARSHMALLOW_SCHEMA.loads(json_data))
except marshmallow.exceptions.ValidationError as exc:
raise ValidationError("Failed to load message", extra=exc.args[0])
def __init__(self, uuid, entrypoint, params=None, result=None,
metadata=None):
Base.__init__(self, uuid, entrypoint, params)
if result:
if isinstance(result, Result):
self.result = result
else:
self.result = Result(**result)
else:
self.result = Result(uuid=uuid)
self.metadata = metadata or dict()
def __repr__(self):
""""""
return "Message<uuid='{}', entrypoint='{}', result={}>".format(
self.uuid, self.entrypoint, self.result
)
| mit | Python |
73371de1d1d25c46063b8d3ffb708b98344abdd7 | fix accidental removal | erinspace/scrapi,fabianvf/scrapi,fabianvf/scrapi,CenterForOpenScience/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi | api/webview/views.py | api/webview/views.py | import json
from django.http import HttpResponse
from rest_framework import generics
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.views.decorators.clickjacking import xframe_options_exempt
from elasticsearch import Elasticsearch
from scrapi import settings
from api.webview.models import Document
from api.webview.serializers import DocumentSerializer
es = Elasticsearch(settings.ELASTIC_URI, request_timeout=settings.ELASTIC_TIMEOUT)
class DocumentList(generics.ListAPIView):
"""
List all documents in the SHARE API
"""
serializer_class = DocumentSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(source=self.request.user)
def get_queryset(self):
""" Return all documents
"""
return Document.objects.all()
class DocumentsFromSource(generics.ListAPIView):
"""
List all documents from a particular source
"""
serializer_class = DocumentSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(source=self.request.user)
def get_queryset(self):
""" Return queryset based on source
"""
return Document.objects.filter(source=self.kwargs['source'])
@api_view(['GET'])
@xframe_options_exempt
def document_detail(request, source, docID):
"""
Retrieve one particular document.
"""
try:
document = Document.objects.get(key=Document._make_key(source, docID))
except Document.DoesNotExist:
return Response(status=404)
serializer = DocumentSerializer(document)
return Response(serializer.data)
@api_view(['GET'])
@xframe_options_exempt
def status(request):
"""
Show the status of the API
"""
return HttpResponse(json.dumps({'status': 'ok'}), content_type='application/json', status=200)
@api_view(['POST'])
def institutions(request):
if not es:
return HttpResponse('No connection to elastic search', status=503)
query = request.data.get('query') or {}
es.indices.create(index='institutions', ignore=400)
res = es.search(index=settings.ELASTIC_INST_INDEX, body=query)
# validate query and grab whats wanted
try:
res = {
'results': [val['_source'] for val in res['hits']['hits']],
'aggregations': res.get('aggregations') or res.get('aggs'),
'count': res['hits']['total']
}
except IndexError:
return Response('Invalid query', status=400)
return Response(json.dumps(res), status=200)
| import json
from django.http import HttpResponse
from rest_framework import generics
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.views.decorators.clickjacking import xframe_options_exempt
from elasticsearch import Elasticsearch
from scrapi import settings
from api.webview.models import Document
from api.webview.serializers import DocumentSerializer
es = Elasticsearch(settings.ELASTIC_URI, request_timeout=settings.ELASTIC_TIMEOUT)
class DocumentList(generics.ListAPIView):
"""
List all documents in the SHARE API
"""
serializer_class = DocumentSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(source=self.request.user)
def get_queryset(self):
""" Return all documents
"""
return Document.objects.all()
class DocumentsFromSource(generics.ListAPIView):
"""
List all documents from a particular source
"""
serializer_class = DocumentSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(source=self.request.user)
def get_queryset(self):
""" Return queryset based on source
"""
return Document.objects.filter(source=self.kwargs['source'])
@api_view(['GET'])
@xframe_options_exempt
def document_detail(request, source, docID):
"""
Retrieve one particular document.
"""
try:
document = Document.objects.get(key=Document._make_key(source, docID))
except Document.DoesNotExist:
return Response(status=404)
serializer = DocumentSerializer(document)
return Response(serializer.data)
@api_view(['GET'])
@xframe_options_exempt
def status(request):
"""
Show the status of the API
"""
return HttpResponse(json.dumps({'status': 'ok'}), content_type='application/json', status=200)
@api_view(['POST'])
def institutions(request):
if not es:
return HttpResponse('No connection to elastic search', status=503)
query = request.data.get('query') or {}
res = es.search(index=settings.ELASTIC_INST_INDEX, body=query)
# validate query and grab whats wanted
try:
res = {
'results': [val['_source'] for val in res['hits']['hits']],
'aggregations': res.get('aggregations') or res.get('aggs'),
'count': res['hits']['total']
}
except IndexError:
return Response('Invalid query', status=400)
return Response(json.dumps(res), status=200)
| apache-2.0 | Python |
622495f16bd6fab3a5c76d18aaa4a3ec4ff6d590 | remove unused import. | constanthatz/data-structures | test_linked_list.py | test_linked_list.py | from linked_list import Node
from linked_list import LinkedList
def test_node_init():
n = Node(3)
assert n.val == 3
assert n.next is None
def test_linkedlist_init():
l = LinkedList()
assert l.head is None
def test_linkedlist_repr():
l = LinkedList()
assert repr(l) == '()'
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert repr(l) == "('Things', 32, 'Bob')"
def test_linkedlist_insert():
l = LinkedList()
l.insert('Bob')
assert l.head.val == 'Bob'
l.insert(32)
assert l.head.val == 32
assert l.head.next.val == 'Bob'
def test_linkedlist_pop():
l = LinkedList()
a = l.pop()
assert a is None
l.insert('Bob')
l.insert(32)
a = l.pop()
assert a == 32
assert l.head.val == 'Bob'
def test_linkedlist_size():
l = LinkedList()
assert l.size() == 0
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert l.size() == 3
l.pop()
assert l.size() == 2
def test_linkedlist_search():
l = LinkedList()
l.insert('Bob')
l.insert(32)
assert l.search(32) == l.head
assert l.search('Bob') == l.head.next
def test_linkedlist_remove():
l = LinkedList()
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert l.size() == 3
l.remove(l.search(32))
assert l.search(32) is None
assert l.size() == 2
l.remove(l.search('Things'))
assert l.search('Things') is None
assert l.size() == 1
l.remove(l.search('Bob'))
assert l.search('Bob') is None
assert l.size() == 0
def test_linkedlist_display(capsys):
l = LinkedList()
l.insert('Bob')
l.insert(32)
l.insert('Things')
l.display()
out, err = capsys.readouterr()
assert out == "('Things', 32, 'Bob')\n"
def test_repr():
l = LinkedList()
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert l.__repr__() == u"('Things', 32, 'Bob')"
| import pytest
from linked_list import Node
from linked_list import LinkedList
def test_node_init():
n = Node(3)
assert n.val == 3
assert n.next is None
def test_linkedlist_init():
l = LinkedList()
assert l.head is None
def test_linkedlist_repr():
l = LinkedList()
assert repr(l) == '()'
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert repr(l) == "('Things', 32, 'Bob')"
def test_linkedlist_insert():
l = LinkedList()
l.insert('Bob')
assert l.head.val == 'Bob'
l.insert(32)
assert l.head.val == 32
assert l.head.next.val == 'Bob'
def test_linkedlist_pop():
l = LinkedList()
a = l.pop()
assert a is None
l.insert('Bob')
l.insert(32)
a = l.pop()
assert a == 32
assert l.head.val == 'Bob'
def test_linkedlist_size():
l = LinkedList()
assert l.size() == 0
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert l.size() == 3
l.pop()
assert l.size() == 2
def test_linkedlist_search():
l = LinkedList()
l.insert('Bob')
l.insert(32)
assert l.search(32) == l.head
assert l.search('Bob') == l.head.next
def test_linkedlist_remove():
l = LinkedList()
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert l.size() == 3
l.remove(l.search(32))
assert l.search(32) is None
assert l.size() == 2
l.remove(l.search('Things'))
assert l.search('Things') is None
assert l.size() == 1
l.remove(l.search('Bob'))
assert l.search('Bob') is None
assert l.size() == 0
def test_linkedlist_display(capsys):
l = LinkedList()
l.insert('Bob')
l.insert(32)
l.insert('Things')
l.display()
out, err = capsys.readouterr()
assert out == "('Things', 32, 'Bob')\n"
def test_repr():
l = LinkedList()
l.insert('Bob')
l.insert(32)
l.insert('Things')
assert l.__repr__() == u"('Things', 32, 'Bob')"
| mit | Python |
b97d65a61ed3c0443ee857ef3d6308e18f962a7a | Fix addparam templatetag to resolve request variable correctly | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/templatetags/addparam.py | akvo/rsr/templatetags/addparam.py | # Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
# django snippet 840, see http://www.djangosnippets.org/snippets/840/
from django.template import Library, Node, TemplateSyntaxError, Variable
register = Library()
class AddParameters(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
req = Variable('request').resolve(context)
params = req.GET.copy()
for i in range(0, len(self.vars), 2):
key = self.vars[i].resolve(context)
if key == '':
key = self.vars[i]
value = self.vars[i + 1].resolve(context)
if value == '':
value = self.vars[i + 1]
params[key] = value
if params:
params = '&'.join(sorted(params.urlencode().split('&')))
return '?%s' % params
else:
return ''
def addparam(parser, token):
"""
Add multiple parameters to current url
Usage:
{% addparam name1 value1 name2 value2 %}
or
{% addparam "name1" value1 "name2" value2 %}
variable can be use inplace of names and values
example: {% addparam "view" message.id %}
"""
bits = token.contents.split(' ')
if len(bits) < 2:
raise TemplateSyntaxError("'%s' tag requires atleast two arguments" % bits[0])
if len(bits) % 2 != 1:
raise TemplateSyntaxError("The arguments must be pairs")
vars = [parser.compile_filter(bit) for bit in bits[1:]]
return AddParameters(vars)
register.tag('addparam', addparam)
| # Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
# django snippet 840, see http://www.djangosnippets.org/snippets/840/
from django.template import Library, Node, resolve_variable, TemplateSyntaxError
register = Library()
class AddParameters(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
req = resolve_variable('request', context)
params = req.GET.copy()
for i in range(0, len(self.vars), 2):
key = self.vars[i].resolve(context)
if key == '':
key = self.vars[i]
value = self.vars[i + 1].resolve(context)
if value == '':
value = self.vars[i + 1]
params[key] = value
if params:
params = '&'.join(sorted(params.urlencode().split('&')))
return '?%s' % params
else:
return ''
def addparam(parser, token):
"""
Add multiple parameters to current url
Usage:
{% addparam name1 value1 name2 value2 %}
or
{% addparam "name1" value1 "name2" value2 %}
variable can be use inplace of names and values
example: {% addparam "view" message.id %}
"""
bits = token.contents.split(' ')
if len(bits) < 2:
raise TemplateSyntaxError("'%s' tag requires atleast two arguments" % bits[0])
if len(bits) % 2 != 1:
raise TemplateSyntaxError("The arguments must be pairs")
vars = [parser.compile_filter(bit) for bit in bits[1:]]
return AddParameters(vars)
register.tag('addparam', addparam)
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.