commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
42a6b8a5c547cf678900bd1f9d4852361edfe75b | Update tests | billvsme/videoSpider | tests/test_start.py | tests/test_start.py | import pytest
import webs
class TestVideoSpider():
def test_entry_points(self):
webs.douban.parsers.movie
webs.douban.parsers.celebrity
webs.douban.parsers.douban_api
webs.douban.parsers.movie_photo
webs.douban.tasks.get_main_movies_base_data
webs.douban.tasks.get_main_movies_full_data
webs.douban.tasks.get_celebrities_full_data
webs.douban.tasks.down_video_images
webs.douban.tasks.down_celebrity_images
webs.bilibili.parsers.bilibili_api
webs.bilibili.parsers.animation
webs.bilibili.tasks.get_animations_base_data
webs.bilibili.tasks.get_animations_full_data
| def test_start():
pass
| mit | Python |
c894bf9754a44cb4a0de26bdc95da65db0d08401 | test for create_task | kacchan822/django-chatwork | tests/test_utils.py | tests/test_utils.py | from django.test import TestCase
from chatwork.utils import send_chatwork, create_task
class SendChatworkTests(TestCase):
def test_send_chatwork(self):
response = send_chatwork('test', 1234)
self.assertEqual(response, {'message_id': '1234'})
class CreateTaskTests(TestCase):
def test_create_task(self):
response = create_task('test', 1234, [123, 124])
self.assertEqual(response, {'task_ids': [123, 124]})
| from django.test import TestCase
from chatwork.utils import send_chatwork
class SendChatworkTests(TestCase):
def test_send_chatwork(self):
response = send_chatwork('test', 1234)
self.assertEqual(response, {'message_id': '1234'})
| mit | Python |
207d4c71fbc40dd30c0099769d6f12fcb63f826e | Add missing username conf for mercurial. | thedrow/pytest-benchmark,SectorLabs/pytest-benchmark,ionelmc/pytest-benchmark,aldanor/pytest-benchmark | tests/test_utils.py | tests/test_utils.py | import subprocess
from pytest import mark
from pytest_benchmark.utils import clonefunc, get_commit_info
pytest_plugins = 'pytester',
f1 = lambda a: a
def f2(a):
return a
@mark.parametrize('f', [f1, f2])
def test_clonefunc(f):
assert clonefunc(f)(1) == f(1)
assert clonefunc(f)(1) == f(1)
def test_clonefunc_not_function():
assert clonefunc(1) == 1
@mark.parametrize('scm', ['git', 'hg'])
def test_get_commit_info(scm, testdir):
subprocess.check_call([scm, 'init', '.'])
if scm == 'git':
subprocess.check_call('git config user.email you@example.com'.split())
subprocess.check_call('git config user.name you'.split())
else:
testdir.tmpdir.join('.hg', 'hgrc').write("""
[ui]
username = you <you@example.com>
""")
testdir.makepyfile('asdf')
subprocess.check_call([scm, 'add', 'test_get_commit_info.py'])
subprocess.check_call([scm, 'commit', '-m', 'asdf'])
out = get_commit_info()
assert out.get('dirty') == False
assert 'id' in out
testdir.makepyfile('sadf')
out = get_commit_info()
assert out.get('dirty') == True
assert 'id' in out
| import subprocess
from pytest import mark
from pytest_benchmark.utils import clonefunc, get_commit_info
f1 = lambda a: a
def f2(a):
return a
@mark.parametrize('f', [f1, f2])
def test_clonefunc(f):
assert clonefunc(f)(1) == f(1)
assert clonefunc(f)(1) == f(1)
def test_clonefunc_not_function():
assert clonefunc(1) == 1
@mark.parametrize('scm', ['git', 'hg'])
def test_get_commit_info(scm, testdir):
subprocess.check_call([scm, 'init', '.'])
if scm == 'git':
subprocess.check_call('git config user.email you@example.com'.split())
subprocess.check_call('git config user.name you'.split())
testdir.makepyfile('asdf')
subprocess.check_call([scm, 'add', 'test_get_commit_info.py'])
subprocess.check_call([scm, 'commit', '-m', 'asdf'])
out = get_commit_info()
assert out.get('dirty') == False
assert 'id' in out
testdir.makepyfile('sadf')
out = get_commit_info()
assert out.get('dirty') == True
assert 'id' in out
| bsd-2-clause | Python |
51d0daa1535b4a1730d2122d8178fb1047a6ab9e | Update import test. | infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore | tests/top/IMPORT.py | tests/top/IMPORT.py | #!/usr/bin/env python
'''Test that all public modules are accessible after importing just 'pyglet'.
This _must_ be the first test run.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: TICK.py 310 2006-12-23 15:56:35Z Alex.Holkner $'
import unittest
import pyglet
__noninteractive = True
modules = [
'app',
'clock',
'event',
'font',
'font.base',
'gl',
'gl.gl_info',
'gl.glu_info',
'graphics',
'graphics.allocation',
'graphics.vertexattribute',
'graphics.vertexbuffer',
'graphics.vertexdomain',
'image',
'image.atlas',
'media',
'resource',
'sprite',
'text',
'text.caret',
'text.document',
'text.layout',
'text.runlist',
'window',
'window.event',
'window.key',
'window.mouse',
]
def add_module_tests(name, bases, dict):
for module in modules:
components = module.split('.')
def create_test(components):
def test_module(self):
top = pyglet
for component in components:
self.assertTrue(hasattr(top, component),
'Cannot access "%s" in "%s"' % (component, top.__name__))
top = getattr(top, component)
return test_module
test_module = create_test(components)
test_name = 'test_%s' % module.replace('.', '_')
test_module.__name__ = test_name
dict[test_name] = test_module
return type.__new__(type, name, bases, dict)
class TEST_CASE(unittest.TestCase):
__metaclass__ = add_module_tests
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
'''Test that all public modules are accessible after importing just 'pyglet'.
This _must_ be the first test run.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: TICK.py 310 2006-12-23 15:56:35Z Alex.Holkner $'
import unittest
import pyglet
__noninteractive = True
modules = [
'app',
'clock',
'event',
'font',
'font.base',
'gl',
'gl.gl_info',
'gl.glu_info',
'graphics',
'graphics.allocation',
'graphics.vertexattribute',
'graphics.vertexbuffer',
'graphics.vertexdomain',
'image',
'media',
'resource',
'sprite',
'text',
'text.caret',
'text.document',
'text.layout',
'text.runlist',
'window',
'window.event',
'window.key',
'window.mouse',
]
def add_module_tests(name, bases, dict):
for module in modules:
components = module.split('.')
def create_test(components):
def test_module(self):
top = pyglet
for component in components:
self.assertTrue(hasattr(top, component),
'Cannot access "%s" in "%s"' % (component, top.__name__))
top = getattr(top, component)
return test_module
test_module = create_test(components)
test_name = 'test_%s' % module.replace('.', '_')
test_module.__name__ = test_name
dict[test_name] = test_module
return type.__new__(type, name, bases, dict)
class TEST_CASE(unittest.TestCase):
__metaclass__ = add_module_tests
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
d18d8f2deb43a55b5898899dcfb2fd6d7fee45fe | Update label in test to match internal changes | aaronc-bixly/notifications,coldmind/pinax-notifications,affiliprint/pinax-notifications,synasius/pinax-notifications,pinax/pinax-notifications,affiliprint/pinax-notifications,aaronc-bixly/notifications,coldmind/pinax-notifications,pinax/pinax-notifications,sherzberg/django-notification,synasius/pinax-notifications,sherzberg/django-notification | pinax/notifications/tests/test_views.py | pinax/notifications/tests/test_views.py | from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from ..compat import get_user_model
from ..models import NoticeType, NoticeSetting
from ..views import NoticeSettingsView
from . import get_backend_id
class TestViews(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user(username="test_user", email="test@user.com", password="123456")
def test_notice_settings(self):
NoticeType.create("label_1", "display", "description")
notice_type_1 = NoticeType.objects.get(label="label_1")
NoticeType.create("label_2", "display", "description")
notice_type_2 = NoticeType.objects.get(label="label_2")
email_id = get_backend_id("email")
setting = NoticeSetting.for_user(self.user, notice_type_2, email_id, scoping=None)
setting.send = False
setting.save()
url = reverse("notification_notice_settings")
request = self.factory.get(url)
request.user = self.user
response = NoticeSettingsView.as_view()(request)
self.assertEqual(response.status_code, 200) # pylint: disable-msg=E1103
label = "setting-{0}-{1}".format(
notice_type_2.pk,
email_id
)
post_data = {
label: "on",
}
request = self.factory.post(url, data=post_data)
request.user = self.user
response = NoticeSettingsView.as_view()(request)
self.assertEqual(response.status_code, 302) # pylint: disable-msg=E1103
self.assertFalse(NoticeSetting.for_user(self.user, notice_type_1, email_id, scoping=None).send)
self.assertTrue(NoticeSetting.for_user(self.user, notice_type_2, email_id, scoping=None).send)
| from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from ..compat import get_user_model
from ..models import NoticeType, NoticeSetting
from ..views import NoticeSettingsView
from . import get_backend_id
class TestViews(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user(username="test_user", email="test@user.com", password="123456")
def test_notice_settings(self):
NoticeType.create("label_1", "display", "description")
notice_type_1 = NoticeType.objects.get(label="label_1")
NoticeType.create("label_2", "display", "description")
notice_type_2 = NoticeType.objects.get(label="label_2")
email_id = get_backend_id("email")
setting = NoticeSetting.for_user(self.user, notice_type_2, email_id, scoping=None)
setting.send = False
setting.save()
url = reverse("notification_notice_settings")
request = self.factory.get(url)
request.user = self.user
response = NoticeSettingsView.as_view()(request)
self.assertEqual(response.status_code, 200) # pylint: disable-msg=E1103
label = "setting-{0}-{1}-{2}".format(
notice_type_2.pk,
notice_type_2.label,
email_id
)
post_data = {
label: "on",
}
request = self.factory.post(url, data=post_data)
request.user = self.user
response = NoticeSettingsView.as_view()(request)
self.assertEqual(response.status_code, 302) # pylint: disable-msg=E1103
self.assertFalse(NoticeSetting.for_user(self.user, notice_type_1, email_id, scoping=None).send)
self.assertTrue(NoticeSetting.for_user(self.user, notice_type_2, email_id, scoping=None).send)
| mit | Python |
5f7d845fe65b9febc45d754a346dd749ab4e13bf | set AUTOSYNTH_MULTIPLE_COMMITS=true for context aware commits (#402) | googleapis/google-cloud-node,googleapis/google-cloud-node,googleapis/google-cloud-node,googleapis/google-cloud-node | packages/google-cloud-language/synth.py | packages/google-cloud-language/synth.py | import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG)
AUTOSYNTH_MULTIPLE_COMMITS = True
gapic = gcp.GAPICMicrogenerator()
# tasks has two product names, and a poorly named artman yaml
for version in ['v1', 'v1beta2']:
library = gapic.typescript_library(
'language',
generator_args={
"grpc-service-config": f"google/cloud/language/{version}/language_grpc_service_config.json",
"package-name":f"@google-cloud/language"
},
proto_path=f'/google/cloud/language/{version}',
version=version)
# skip index, protos, package.json, and README.md
s.copy(
library,
excludes=['package.json', 'README.md', 'src/index.ts'])
# Update common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
subprocess.run(['npx', 'compileProtos', 'src'])
| import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICMicrogenerator()
# tasks has two product names, and a poorly named artman yaml
for version in ['v1', 'v1beta2']:
library = gapic.typescript_library(
'language',
generator_args={
"grpc-service-config": f"google/cloud/language/{version}/language_grpc_service_config.json",
"package-name":f"@google-cloud/language"
},
proto_path=f'/google/cloud/language/{version}',
version=version)
# skip index, protos, package.json, and README.md
s.copy(
library,
excludes=['package.json', 'README.md', 'src/index.ts'])
# Update common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
subprocess.run(['npx', 'compileProtos', 'src'])
| apache-2.0 | Python |
1068eb81a1e3bb6031cce536450694d159d55034 | bump version number to 0.0.4 | Christophe31/django-tickets,Christophe31/django-tickets,byteweaver/django-tickets,byteweaver/django-tickets | tickets/__init__.py | tickets/__init__.py | __version__ = '0.0.4'
| __version__ = '0.0.3'
| bsd-3-clause | Python |
4e2b1c78102079c2138cd951289b5bb9f66205fc | Update __openerp__.py | elmonitor/workshop-td | product_price_margin_div/__openerp__.py | product_price_margin_div/__openerp__.py | {
"name": "Product price based on margin",
"description": "Product price based on margin with formula (sale_price=cost_price/margin)"
"version": "8.0.0.1",
"author": "3nodus",
'category': 'Product',
"website": "http://www.3nodus.com/",
"license": "AGPL-3",
"depends": [
"product",
],
"demo": [
],
"data": [
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
}
| {
"name": "Product price based on margin with formula sale_price=cost_price/margin",
"version": "8.0.0.1",
"author": "3nodus",
'category': 'Product',
"website": "http://www.3nodus.com/",
"license": "AGPL-3",
"depends": [
"product",
],
"demo": [
],
"data": [
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
}
| agpl-3.0 | Python |
151cfae24b650b74a9a216be61c3e440369274d9 | Fix 'website´ manifest key | kmee/department,OCA/department,acsone/department | project_issue_department/__openerp__.py | project_issue_department/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Issue with Department',
'version': '8.0.1.1.0',
"category": "Project Management",
'description': """\
Add Department field to Project Issues.
Selecting a Project for an issue will automatically populate this with the
Project's defined Department.
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'website': 'https://odoo-community.org',
'license': 'AGPL-3',
'depends': [
'project_issue',
'project_department',
],
'data': [
'project_issue_view.xml',
'security/ir.model.access.csv',
],
'installable': True,
'application': False,
'auto_install': True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Issue with Department',
'version': '8.0.1.1.0',
"category": "Project Management",
'description': """\
Add Department field to Project Issues.
Selecting a Project for an issue will automatically populate this with the
Project's defined Department.
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'website': 'daniel.reis@securitas.pt',
'license': 'AGPL-3',
'depends': [
'project_issue',
'project_department',
],
'data': [
'project_issue_view.xml',
'security/ir.model.access.csv',
],
'installable': True,
'application': False,
'auto_install': True,
}
| agpl-3.0 | Python |
c2ea66556d52513c087f31401aacee907b2a6742 | unify kernels | okuta/chainer,keisuke-umezawa/chainer,aonotas/chainer,okuta/chainer,tkerola/chainer,hvy/chainer,wkentaro/chainer,niboshi/chainer,chainer/chainer,ktnyt/chainer,okuta/chainer,jnishi/chainer,niboshi/chainer,chainer/chainer,niboshi/chainer,rezoo/chainer,ronekko/chainer,niboshi/chainer,chainer/chainer,wkentaro/chainer,jnishi/chainer,ktnyt/chainer,keisuke-umezawa/chainer,wkentaro/chainer,pfnet/chainer,ktnyt/chainer,keisuke-umezawa/chainer,jnishi/chainer,anaruse/chainer,jnishi/chainer,hvy/chainer,hvy/chainer,wkentaro/chainer,ktnyt/chainer,chainer/chainer,okuta/chainer,keisuke-umezawa/chainer,hvy/chainer | chainer/functions/math/maximum.py | chainer/functions/math/maximum.py | import numpy
import chainer
from chainer import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Maximum(function_node.FunctionNode):
"""Element-wise maximum of input variables."""
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 2,
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x1, x2 = inputs
y = numpy.maximum(x1, x2)
return utils.force_array(y),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x1, x2 = inputs
return cuda.cupy.maximum(x1, x2),
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
return MaximumGrad(
utils.force_array(x1.data >= x2.data)).apply((grad_outputs[0],))
class MaximumGrad(function_node.FunctionNode):
def __init__(self, cond):
self.cond = cond
def forward_cpu(self, inputs):
gy, = inputs
gx1 = numpy.where(self.cond, gy, gy.dtype.type(0))
gx2 = numpy.where(self.cond, gy.dtype.type(0), gy)
return utils.force_array(gx1), utils.force_array(gx2)
def forward_gpu(self, inputs):
gy, = inputs
gx1, gx2 = cuda.elementwise(
'S cond, T gy', 'T gx1, T gx2',
'''
gx1 = cond ? gy : (T)0.0;
gx2 = cond ? (T)0.0 : gy;
''',
'maximum_bwd1')(self.cond, gy)
return gx1, gx2
def backward(self, indexes, grad_outputs):
return chainer.functions.where(
self.cond, grad_outputs[0], grad_outputs[1]),
def maximum(x1, x2):
"""Element-wise maximum of input variables.
Args:
x1 (~chainer.Variable): Input variables to be compared.
x2 (~chainer.Variable): Input variables to be compared.
Returns:
~chainer.Variable: Output variable.
"""
return Maximum().apply((x1, x2))[0]
| import numpy
import chainer
from chainer import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Maximum(function_node.FunctionNode):
"""Element-wise maximum of input variables."""
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 2,
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x1, x2 = inputs
y = numpy.maximum(x1, x2)
return utils.force_array(y),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x1, x2 = inputs
return cuda.cupy.maximum(x1, x2),
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
return MaximumGrad(
utils.force_array(x1.data >= x2.data)).apply((grad_outputs[0],))
class MaximumGrad(function_node.FunctionNode):
def __init__(self, cond):
self.cond = cond
def forward_cpu(self, inputs):
gy, = inputs
gx1 = numpy.where(self.cond, gy, gy.dtype.type(0))
gx2 = numpy.where(self.cond, gy.dtype.type(0), gy)
return utils.force_array(gx1), utils.force_array(gx2)
def forward_gpu(self, inputs):
gy, = inputs
gx1 = cuda.elementwise(
'S cond, T gy', 'T gx1',
'gx1 = cond ? gy : (T)0.0',
'maximum_bwd1')(self.cond, gy)
gx2 = cuda.elementwise(
'S cond, T gy', 'T gx1',
'gx1 = cond ? (T)0.0 : gy',
'maximum_bwd2')(self.cond, gy)
return gx1, gx2
def backward(self, indexes, grad_outputs):
return chainer.functions.where(
self.cond, grad_outputs[0], grad_outputs[1]),
def maximum(x1, x2):
"""Element-wise maximum of input variables.
Args:
x1 (~chainer.Variable): Input variables to be compared.
x2 (~chainer.Variable): Input variables to be compared.
Returns:
~chainer.Variable: Output variable.
"""
return Maximum().apply((x1, x2))[0]
| mit | Python |
9b974a6395bc2ee35271d5f250684d54eab87c63 | fix startDate | CERT-BDF/TheHive4py | TheHive4py/models.py | TheHive4py/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
class CustomJsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, JSONSerializable):
return o.__dict__
else:
return json.JSONEncoder.encode(self, o)
class JSONSerializable(object):
# def __repr__(self):
# return json.dumps(self.__dict__)
def jsonify(self):
return json.dumps(self, sort_keys=True, cls=CustomJsonEncoder)
# return json.dumps(self.__dict__)
class Case(JSONSerializable):
def __init__(self, **attributes):
self.title = attributes['title'] if attributes.get('title') else None
self.description = attributes['description'] if attributes.get('description') else None
self.tlp = attributes['tlp'] if attributes.get('tlp') else 2
self.severity = attributes['severity'] if attributes.get('severity') else 2
self.flag = attributes['flag'] if attributes.get('flag') else False
self.tags = attributes['tags'] if attributes.get('tags') else []
self.tasks = attributes['tasks'] if attributes.get('tasks') else []
self.metrics = attributes['metrics'] if attributes.get('metrics') else {}
self.startDate = attributes['startDate'] if attributes.get('startDate') else int(time.time())*1000
class CaseTask(JSONSerializable):
def __init__(self, **attributes):
self.title = attributes['title'] if attributes.get('title') else None
self.status = attributes['status'] if attributes.get('status') else 'Waiting'
self.flag = attributes['flag'] if attributes.get('flag') else False
self.description = attributes['description'] if attributes.get('description') else None
self.owner = attributes['owner'] if attributes.get('owner') else None
self.startDate = attributes['startDate'] if attributes.get('startDate') else None
class CaseTaskLog(JSONSerializable):
def __init__(self, **attributes):
self.message = attributes['message'] if attributes.get('message') else None
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
class CustomJsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, JSONSerializable):
return o.__dict__
else:
return json.JSONEncoder.encode(self, o)
class JSONSerializable(object):
# def __repr__(self):
# return json.dumps(self.__dict__)
def jsonify(self):
return json.dumps(self, sort_keys=True, cls=CustomJsonEncoder)
# return json.dumps(self.__dict__)
class Case(JSONSerializable):
def __init__(self, **attributes):
self.title = attributes['title'] if attributes.get('title') else None
self.description = attributes['description'] if attributes.get('description') else None
self.tlp = attributes['tlp'] if attributes.get('tlp') else 2
self.severity = attributes['severity'] if attributes.get('severity') else 2
self.flag = attributes['flag'] if attributes.get('flag') else False
self.tags = attributes['tags'] if attributes.get('tags') else []
self.tasks = attributes['tasks'] if attributes.get('tasks') else []
self.metrics = attributes['metrics'] if attributes.get('metrics') else {}
self.startDate = attributes['startDate'] if attributes.get('startDate') else int(time.time())
class CaseTask(JSONSerializable):
def __init__(self, **attributes):
self.title = attributes['title'] if attributes.get('title') else None
self.status = attributes['status'] if attributes.get('status') else 'Waiting'
self.flag = attributes['flag'] if attributes.get('flag') else False
self.description = attributes['description'] if attributes.get('description') else None
self.owner = attributes['owner'] if attributes.get('owner') else None
self.startDate = attributes['startDate'] if attributes.get('startDate') else None
class CaseTaskLog(JSONSerializable):
def __init__(self, **attributes):
self.message = attributes['message'] if attributes.get('message') else None
| agpl-3.0 | Python |
a172a1d4c0458fa0c1e145b7d44971d631ac30c8 | Bump version. | datajoint/datajoint-python,dimitri-yatsenko/datajoint-python | datajoint/version.py | datajoint/version.py | __version__ = "0.13.dev6"
assert len(__version__) <= 10 # The log table limits version to the 10 characters
| __version__ = "0.13.dev5"
assert len(__version__) <= 10 # The log table limits version to the 10 characters
| lgpl-2.1 | Python |
0549ad0bd541f4b32e522e62a007358f580faedd | Fix blueprints helper | DataViva/dataviva-api | app/helpers/blueprints_helper.py | app/helpers/blueprints_helper.py | from os import getcwd, path
from importlib import import_module
from pkgutil import walk_packages
def register_blueprints(flask, package):
package_dir = path.join(getcwd(), flask.name, package)
blueprints = [import_module(f"{flask.name}.{package}.{module.name}").blueprint
for module in walk_packages([package_dir])]
for blueprint in blueprints:
flask.register_blueprint(blueprint)
| from os import getcwd, path
from importlib import import_module
from pkgutil import walk_packages
def register_blueprints(flask, package):
package_dir = path.join(getcwd(), flask.name, package)
blueprints = [import_module(f"{__name__}.{module.name}").blueprint
for module in walk_packages(package_dir)]
for blueprint in blueprints:
flask.register_blueprint(blueprint)
| mit | Python |
e3dcb3497b921786db581571ef129cba80536c2e | Bump to 3.1.0 | ZuluPro/django-dbbackup,mjs7231/django-dbbackup,django-dbbackup/django-dbbackup,mjs7231/django-dbbackup,django-dbbackup/django-dbbackup,ZuluPro/django-dbbackup | dbbackup/__init__.py | dbbackup/__init__.py | "Management commands to help backup and restore a project database and media"
VERSION = (3, 1, 0)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = 'mjs7231@gmail.com'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
| "Management commands to help backup and restore a project database and media"
VERSION = (3, 0, 4)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = 'mjs7231@gmail.com'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
| bsd-3-clause | Python |
f86b502746d303900061ba33b58135f5730988e6 | Update doc for Snake activation to match literature and return statement (#2572) | tensorflow/addons,tensorflow/addons,tensorflow/addons | tensorflow_addons/activations/snake.py | tensorflow_addons/activations/snake.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.utils import types
@tf.keras.utils.register_keras_serializable(package="Addons")
def snake(x: types.TensorLike, frequency: types.Number = 1) -> tf.Tensor:
r"""Snake activation to learn periodic functions.
Computes snake activation:
$$
\mathrm{snake}(x) = \mathrm{x} + \frac{1 - \cos(2 \cdot \mathrm{frequency} \cdot x)}{2 \cdot \mathrm{frequency}}.
$$
See [Neural Networks Fail to Learn Periodic Functions and How to Fix It](https://arxiv.org/abs/2006.08195).
Usage:
>>> x = tf.constant([-1.0, 0.0, 1.0])
>>> tfa.activations.snake(x)
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.29192656, 0. , 1.7080734 ], dtype=float32)>
Args:
x: A `Tensor`.
frequency: A scalar, frequency of the periodic part.
Returns:
A `Tensor`. Has the same type as `x`.
"""
x = tf.convert_to_tensor(x)
frequency = tf.cast(frequency, x.dtype)
return x + (1 - tf.cos(2 * frequency * x)) / (2 * frequency)
| # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.utils import types
@tf.keras.utils.register_keras_serializable(package="Addons")
def snake(x: types.TensorLike, frequency: types.Number = 1) -> tf.Tensor:
r"""Snake activation to learn periodic functions.
Computes snake activation:
$$
\mathrm{snake}(x) = \frac{x + (1 - \cos(2 \cdot \mathrm{frequency} \cdot x))}{2 \cdot \mathrm{frequency}}.
$$
See [Neural Networks Fail to Learn Periodic Functions and How to Fix It](https://arxiv.org/abs/2006.08195).
Usage:
>>> x = tf.constant([-1.0, 0.0, 1.0])
>>> tfa.activations.snake(x)
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.29192656, 0. , 1.7080734 ], dtype=float32)>
Args:
x: A `Tensor`.
frequency: A scalar, frequency of the periodic part.
Returns:
A `Tensor`. Has the same type as `x`.
"""
x = tf.convert_to_tensor(x)
frequency = tf.cast(frequency, x.dtype)
return x + (1 - tf.cos(2 * frequency * x)) / (2 * frequency)
| apache-2.0 | Python |
5b6935487f0740b56cdeb6d85d5d95d4702e10a6 | Update filtering.py | krzjoa/sciquence,krzjoa/sciquence | sciquence/sequences/filtering.py | sciquence/sequences/filtering.py | def parallel_filter(condition, *lists):
'''
Parallelly filter multiple lists.
Parameters
----------
condition: callable
A function, which has as many arguments as the number of lists
lists: list of list
Returns
-------
filtered_lists:
Filtered accordingly some criterion
'''
# TODO: check length
if isinstance(lists[0], list) and len(lists) == 1:
lists = lists[0]
output = [[] for _ in xrange(len(lists))]
for d in zip(*lists):
if condition(*list(d)):
multi_append(output, *list(d))
print output
return output
| def parallel_filter(condition, *lists):
'''
Parallelly filter multiple lists.
Parameters
----------
condition: callable
A function, which has as many arguments as the number of lists
lists: list of list
Returns
-------
filtered_lists:
Filtered accordingly some criterion
'''
# TODO: check length
output = [[] for _ in xrange(len(lists))]
for d in zip(*lists):
if condition(*list(d)):
multi_append(output, *list(d))
print output
return output
| mit | Python |
ebe21ae853f8fafa6fbbe07dfbf5dc98c3b887ee | Add Persian Wikivoyage | magul/pywikibot-core,darthbhyrava/pywikibot-local,hasteur/g13bot_tools_new,happy5214/pywikibot-core,npdoty/pywikibot,xZise/pywikibot-core,Darkdadaah/pywikibot-core,hasteur/g13bot_tools_new,happy5214/pywikibot-core,valhallasw/pywikibot-core,smalyshev/pywikibot-core,h4ck3rm1k3/pywikibot-core,jayvdb/pywikibot-core,hasteur/g13bot_tools_new,magul/pywikibot-core,PersianWikipedia/pywikibot-core,wikimedia/pywikibot-core,wikimedia/pywikibot-core,TridevGuha/pywikibot-core,icyflame/batman,VcamX/pywikibot-core,npdoty/pywikibot,h4ck3rm1k3/pywikibot-core,Darkdadaah/pywikibot-core,trishnaguha/pywikibot-core,jayvdb/pywikibot-core,emijrp/pywikibot-core | pywikibot/families/wikivoyage_family.py | pywikibot/families/wikivoyage_family.py | # -*- coding: utf-8 -*-
"""Family module for Wikivoyage."""
__version__ = '$Id$'
# The new wikivoyage family that is hosted at wikimedia
from pywikibot import family
class Family(family.WikimediaFamily):
"""Family class for Wikivoyage."""
def __init__(self):
"""Constructor."""
super(Family, self).__init__()
self.name = 'wikivoyage'
self.languages_by_size = [
'en', 'de', 'fr', 'it', 'pt', 'nl', 'pl', 'ru', 'es', 'vi', 'sv',
'zh', 'he', 'ro', 'uk', 'el', 'fa'
]
self.langs = dict([(lang, '%s.wikivoyage.org' % lang)
for lang in self.languages_by_size])
# Global bot allowed languages on https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['es', 'ru', ]
def shared_data_repository(self, code, transcluded=False):
"""Return the shared data repository for this site."""
return ('wikidata', 'wikidata')
| # -*- coding: utf-8 -*-
"""Family module for Wikivoyage."""
__version__ = '$Id$'
# The new wikivoyage family that is hosted at wikimedia
from pywikibot import family
class Family(family.WikimediaFamily):
"""Family class for Wikivoyage."""
def __init__(self):
"""Constructor."""
super(Family, self).__init__()
self.name = 'wikivoyage'
self.languages_by_size = [
'en', 'de', 'fr', 'it', 'pt', 'nl', 'pl', 'ru', 'es', 'vi', 'sv',
'zh', 'he', 'ro', 'uk', 'el',
]
self.langs = dict([(lang, '%s.wikivoyage.org' % lang)
for lang in self.languages_by_size])
# Global bot allowed languages on https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['es', 'ru', ]
def shared_data_repository(self, code, transcluded=False):
"""Return the shared data repository for this site."""
return ('wikidata', 'wikidata')
| mit | Python |
967cf8774a5033f310ca69e7ad86fc79b2628882 | Tag provisioning instances to make them easier to identify | bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox | infrastructure/aws/trigger-provision.py | infrastructure/aws/trigger-provision.py | # trigger-provision.py <indexer-provision.sh | web-server-provision.sh>
import boto3
from datetime import datetime, timedelta
import sys
import os.path
provisioners = sys.argv[1:]
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
script = ''
for provisioner in provisioners:
script += open(provisioner).read() + '\n'
user_data = '''#!/usr/bin/env bash
cat > ~ubuntu/provision.sh <<"FINAL"
{script}
FINAL
chmod +x ~ubuntu/provision.sh
sudo -i -u ubuntu ~ubuntu/provision.sh
'''.format(script=script)
# ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20160815 (ami-f701cb97)
image_id = 'ami-f701cb97'
launch_spec = {
'ImageId': image_id,
'KeyName': 'Main Key Pair',
'SecurityGroups': ['indexer'],
'UserData': user_data,
'InstanceType': 'c3.2xlarge',
'BlockDeviceMappings': [],
'TagSpecifications': [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'provisioner',
'Value': sys.argv[1],
}],
}],
}
client.run_instances(MinCount=1, MaxCount=1, **launch_spec)
| # trigger-provision.py <indexer-provision.sh | web-server-provision.sh>
import boto3
from datetime import datetime, timedelta
import sys
import os.path
provisioners = sys.argv[1:]
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
script = ''
for provisioner in provisioners:
script += open(provisioner).read() + '\n'
user_data = '''#!/usr/bin/env bash
cat > ~ubuntu/provision.sh <<"FINAL"
{script}
FINAL
chmod +x ~ubuntu/provision.sh
sudo -i -u ubuntu ~ubuntu/provision.sh
'''.format(script=script)
# ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20160815 (ami-f701cb97)
image_id = 'ami-f701cb97'
launch_spec = {
'ImageId': image_id,
'KeyName': 'Main Key Pair',
'SecurityGroups': ['indexer'],
'UserData': user_data,
'InstanceType': 'c3.2xlarge',
'BlockDeviceMappings': []
}
client.run_instances(MinCount=1, MaxCount=1, **launch_spec)
| mpl-2.0 | Python |
1fd6d7243cbbba95a747327d9c21ec0972b97e16 | Update tests | josuemontano/API-platform,josuemontano/pyramid-angularjs-starter,josuemontano/pyramid-angularjs-starter,josuemontano/API-platform,josuemontano/pyramid-angularjs-starter,josuemontano/api-starter,josuemontano/API-platform,josuemontano/api-starter,josuemontano/API-platform,josuemontano/api-starter | demonstrare/tests.py | demonstrare/tests.py | import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import index
request = testing.DummyRequest()
info = index(request)
self.assertEqual(info['year'], 2015)
| import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'demonstrare')
| mit | Python |
15d4f03471c7b34feab05ec725ccb536923aae6b | add methods to call ruby library | SmartDeveloperHub/gitlab-api-generator,SmartDeveloperHub/gitlab-api-generator,SmartDeveloperHub/gitlab-api-generator,SmartDeveloperHub/gitlab-api-generator | utils/repository.py | utils/repository.py | """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import os
import shutil
import settings
from subprocess import call
__author__ = 'Alejandro F. Carrera'
def get_branches_with_filter(repository, fil):
__branches = repository.branches.keys()
if fil is None:
return __branches
else:
return [x for x in __branches if fil in x]
def remove_info_from_branches(branches_list, fil, branches_to_remove):
__branches = branches_list
for i in branches_to_remove:
__branches.remove(i)
return map(lambda x: x.replace("-" + fil, ""), __branches)
def move_to_specific_branch(repository, branch):
repository.switch_branch(branch)
def generate_doc(branch):
file_path = settings.GEN_DOC_DISK_PATH
if os.path.exists(file_path):
if not os.path.isdir(file_path):
os.remove(file_path)
else:
shutil.rmtree(file_path, True)
settings.print_message(" - Generating branch: %s." % branch)
call(["./gitlab-docs/generate.rb"])
| """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
__author__ = 'Alejandro F. Carrera'
def get_branches_with_filter(repository, fil):
__branches = repository.branches.keys()
if fil is None:
return __branches
else:
return [x for x in __branches if fil in x]
def remove_info_from_branches(branches_list, fil, branches_to_remove):
__branches = branches_list
for i in branches_to_remove:
__branches.remove(i)
return map(lambda x: x.replace("-" + fil, ""), __branches)
| apache-2.0 | Python |
eceacfc149948c6d5c618e8ec609c05de66d939d | Update largest-number.py | kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,jaredkoontz/leetcode,githubutilities/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,githubutilities/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/largest-number.py | Python/largest-number.py | # Time: O(nlogn)
# Space: O(1)
#
# Given a list of non negative integers, arrange them such that they form the largest number.
#
# For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
#
# Note: The result may be very large, so you need to return a string instead of an integer.
#
class Solution:
# @param num, a list of integers
# @return a string
def largestNumber(self, num):
num = [str(x) for x in num]
num.sort(cmp=lambda x, y: cmp(y + x, x + y))
largest = ''.join(num)
return largest.lstrip('0') or '0'
if __name__ == "__main__":
num = [3, 30, 34, 5, 9]
print Solution().largestNumber(num)
| # Time: O(n^2)
# Space: O(n)
#
# Given a list of non negative integers, arrange them such that they form the largest number.
#
# For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
#
# Note: The result may be very large, so you need to return a string instead of an integer.
#
class Solution:
# @param num, a list of integers
# @return a string
def largestNumber(self, num):
num = [str(x) for x in num]
num.sort(cmp=lambda x, y: cmp(y + x, x + y))
largest = ''.join(num)
return largest.lstrip('0') or '0'
if __name__ == "__main__":
num = [3, 30, 34, 5, 9]
print Solution().largestNumber(num)
| mit | Python |
2f31af1ea22e0bfefe2042f8d2ba5ad6ce365116 | Clear S3 API key from code | jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate | RG/drivers/s3/secrets.py | RG/drivers/s3/secrets.py | #!/usr/bin/python
SECRETS = {
"AWS_ACCESS_KEY_ID": "XXX",
"AWS_SECRET_ACCESS_KEY": "XXX"
}
| #!/usr/bin/python
SECRETS = {
"AWS_ACCESS_KEY_ID": "AKIAI2TRFQV2HZIHUD4A",
"AWS_SECRET_ACCESS_KEY": "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"
}
| apache-2.0 | Python |
9f1a546a4a2b044b9b2513c4a653faaab299a76c | Add queryset for create channel API (#3534) | mitodl/micromasters,mitodl/micromasters,mitodl/micromasters,mitodl/micromasters | discussions/views.py | discussions/views.py | """APIs for discussions"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from rest_framework import status
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from discussions.permissions import CanCreateChannel
from discussions.serializers import ChannelSerializer
from discussions.utils import get_token_for_request
def _set_jwt_cookie(response, token):
"""
Set the token on the response
Args:
response (django.http.Response): the response to set the cookie on
token (str): the JWT token
"""
response.set_cookie(
settings.OPEN_DISCUSSIONS_COOKIE_NAME,
token,
domain=settings.OPEN_DISCUSSIONS_COOKIE_DOMAIN,
httponly=True
)
@api_view()
def discussions_token(request):
"""
API view for setting discussions JWT token
"""
token = get_token_for_request(request)
if token is not None:
response = Response({
'has_token': True
})
_set_jwt_cookie(response, token)
else:
response = Response({
'has_token': False
}, status=status.HTTP_403_FORBIDDEN)
return response
@login_required
def discussions_redirect(request):
"""
View for setting discussions JWT token and redirecting to discussions
"""
token = get_token_for_request(request)
if token is not None:
response = redirect(settings.OPEN_DISCUSSIONS_REDIRECT_URL)
_set_jwt_cookie(response, token)
else:
response = HttpResponse('', status=status.HTTP_409_CONFLICT)
return response
class ChannelsView(CreateAPIView):
"""
View for discussions channels. Used to create new channels
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (
IsAuthenticated,
CanCreateChannel,
)
serializer_class = ChannelSerializer
# Make django-rest-framework happy
queryset = []
| """APIs for discussions"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from rest_framework import status
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from discussions.permissions import CanCreateChannel
from discussions.serializers import ChannelSerializer
from discussions.utils import get_token_for_request
def _set_jwt_cookie(response, token):
"""
Set the token on the response
Args:
response (django.http.Response): the response to set the cookie on
token (str): the JWT token
"""
response.set_cookie(
settings.OPEN_DISCUSSIONS_COOKIE_NAME,
token,
domain=settings.OPEN_DISCUSSIONS_COOKIE_DOMAIN,
httponly=True
)
@api_view()
def discussions_token(request):
"""
API view for setting discussions JWT token
"""
token = get_token_for_request(request)
if token is not None:
response = Response({
'has_token': True
})
_set_jwt_cookie(response, token)
else:
response = Response({
'has_token': False
}, status=status.HTTP_403_FORBIDDEN)
return response
@login_required
def discussions_redirect(request):
"""
View for setting discussions JWT token and redirecting to discussions
"""
token = get_token_for_request(request)
if token is not None:
response = redirect(settings.OPEN_DISCUSSIONS_REDIRECT_URL)
_set_jwt_cookie(response, token)
else:
response = HttpResponse('', status=status.HTTP_409_CONFLICT)
return response
class ChannelsView(CreateAPIView):
"""
View for discussions channels. Used to create new channels
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (
IsAuthenticated,
CanCreateChannel,
)
serializer_class = ChannelSerializer
| bsd-3-clause | Python |
6bdc9af31bda7a429a457445c16b0df7cc800239 | Allow get_new_id to take sets and lists | igboyes/virtool,igboyes/virtool,virtool/virtool,virtool/virtool | virtool/db/utils.py | virtool/db/utils.py | import virtool.utils
def apply_projection(document, projection):
"""
Apply a Mongo-style projection to a document and return it.
:param document: the document to project
:type document: dict
:param projection: the projection to apply
:type projection: Union[dict,list]
:return: the projected document
:rtype: dict
"""
if isinstance(projection, list):
if "_id" not in projection:
projection.append("_id")
return {key: document[key] for key in projection}
if not isinstance(projection, dict):
raise TypeError("Invalid type for projection: {}".format(type(projection)))
if projection == {"_id": False}:
return {key: document[key] for key in document if key != "_id"}
if "_id" not in projection:
projection["_id"] = True
return {key: document[key] for key in document if projection.get(key, False)}
async def get_new_id(collection, excluded=None):
"""
Returns a new, unique, id that can be used for inserting a new document. Will not return any id that is included
in ``excluded``.
:param collection: the Mongo collection to get a new _id for
:type collection: :class:`motor.motor_asyncio.AsyncIOMotorCollection`
:param excluded: a list of ids to exclude from the search
:type excluded: Union[list, set]
:return: an id unique to the collection
:rtype: str
"""
excluded = set(excluded or set())
excluded.update(await collection.distinct("_id"))
return virtool.utils.random_alphanumeric(length=8, excluded=excluded)
async def get_one_field(collection, field, query):
projected = await collection.find_one(query, [field])
if projected is None:
return None
return projected[field]
async def get_non_existent_ids(collection, id_list):
existing_group_ids = await collection.distinct("_id", {"_id": {"$in": id_list}})
return set(id_list) - set(existing_group_ids)
async def id_exists(collection, _id):
"""
Check if the document id exists in the collection.
:param collection: the Mongo collection to check the _id against
:type collection: :class:`motor.motor_asyncio.AsyncIOMotorCollection`
:param _id: the _id to check for
:type _id: str
:return: ``bool`` indicating if the id exists
:rtype: bool
"""
return bool(await collection.count({"_id": _id}))
async def ids_exist(collection, id_list):
"""
Check if all of the ids passed in ``id_list`` exist in the collection.
:param collection: the Mongo collection to check ``id_list`` against
:type collection: :class:`motor.motor_asyncio.AsyncIOMotorCollection`
:param id_list: the ids to check for
:type id_list: str
:return: ``bool`` indicating if the ids exist
:rtype: bool
"""
return await collection.count({"_id": {"$in": id_list}}) == len(id_list)
| import virtool.utils
def apply_projection(document, projection):
"""
Apply a Mongo-style projection to a document and return it.
:param document: the document to project
:type document: dict
:param projection: the projection to apply
:type projection: Union[dict,list]
:return: the projected document
:rtype: dict
"""
if isinstance(projection, list):
if "_id" not in projection:
projection.append("_id")
return {key: document[key] for key in projection}
if not isinstance(projection, dict):
raise TypeError("Invalid type for projection: {}".format(type(projection)))
if projection == {"_id": False}:
return {key: document[key] for key in document if key != "_id"}
if "_id" not in projection:
projection["_id"] = True
return {key: document[key] for key in document if projection.get(key, False)}
async def get_new_id(collection, excluded=None):
"""
Returns a new, unique, id that can be used for inserting a new document. Will not return any id that is included
in ``excluded``.
:param collection: the Mongo collection to get a new _id for
:type collection: :class:`motor.motor_asyncio.AsyncIOMotorCollection`
:param excluded: a list of ids to exclude from the search
:type excluded: Union[None, list]
:return: an id unique to the collection
:rtype: str
"""
excluded = excluded or list()
excluded += await collection.distinct("_id")
return virtool.utils.random_alphanumeric(length=8, excluded=excluded)
async def get_one_field(collection, field, query):
projected = await collection.find_one(query, [field])
if projected is None:
return None
return projected[field]
async def get_non_existent_ids(collection, id_list):
existing_group_ids = await collection.distinct("_id", {"_id": {"$in": id_list}})
return set(id_list) - set(existing_group_ids)
async def id_exists(collection, _id):
"""
Check if the document id exists in the collection.
:param collection: the Mongo collection to check the _id against
:type collection: :class:`motor.motor_asyncio.AsyncIOMotorCollection`
:param _id: the _id to check for
:type _id: str
:return: ``bool`` indicating if the id exists
:rtype: bool
"""
return bool(await collection.count({"_id": _id}))
async def ids_exist(collection, id_list):
"""
Check if all of the ids passed in ``id_list`` exist in the collection.
:param collection: the Mongo collection to check ``id_list`` against
:type collection: :class:`motor.motor_asyncio.AsyncIOMotorCollection`
:param id_list: the ids to check for
:type id_list: str
:return: ``bool`` indicating if the ids exist
:rtype: bool
"""
return await collection.count({"_id": {"$in": id_list}}) == len(id_list)
| mit | Python |
1a26df87bdae0b8ccc93047b535f87d9daa2bf84 | update incident script | FireCARES/fire-risk,garnertb/fire-risk,FireCARES/fire-risk,garnertb/fire-risk | Scripts/fire_incident.py | Scripts/fire_incident.py | #Weinschenk
#12-14
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import random
incident = pd.read_csv('../Data/arlington_incidents.csv', header=0)
total_incidents = len(incident['incident_class_code'])
total_fires = 0
for i in incident['incident_class_code']:
if i == 1:
total_fires = total_fires + 1
for num in range(1,1000):
rand = random.randrange(1,len(incident['incident_class_code']),1)
if incident['incident_class_code'][rand] == 1:
fire = True
print rand,fire
else:
fire = False
| #Weinschenk
#12-14
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
incident = pd.read_csv('../Data/arlington_incidents.csv', header=0)
total_incidents = len(incident['incident_class_code'])
total_fires = 0
for i in incident['incident_class_code']:
if i == 1:
total_fires = total_fires + 1
print 100*(total_fires/total_incidents)
| mit | Python |
7835b9e6d6c35fcfd38b0fdebe6cdf92a38e3c3a | Make necessary directories | PixxxeL/django-ffmpeg | django_ffmpeg/app.py | django_ffmpeg/app.py | import os
from django.apps import AppConfig
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django_ffmpeg import defaults as ffmpeg_settings
class DjangoFfmpegConfig(AppConfig):
name = 'django_ffmpeg'
verbose_name = _('Videos')
def ready(self):
self._make_directories()
def _make_directories(self):
dirs = (
ffmpeg_settings.FFMPEG_ORIG_VIDEO,
ffmpeg_settings.FFMPEG_THUMB_VIDEO,
ffmpeg_settings.FFMPEG_CONV_VIDEO,
)
for _ in dirs:
os.makedirs(os.path.join(
settings.MEDIA_ROOT,
ffmpeg_settings.FFMPEG_PRE_DIR,
_
), exist_ok =True)
| from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DjangoFfmpegConfig(AppConfig):
name = 'django_ffmpeg'
verbose_name = _('Videos')
| mit | Python |
1375cece723b9627be1413f2eb25aa53b6a918ec | Update __init__.py | Koed00/django-q | django_q/__init__.py | django_q/__init__.py | from .tasks import async, schedule, result, fetch
from .models import Task, Schedule
VERSION = (0, 3, q)
default_app_config = 'django_q.apps.DjangoQConfig'
| from .tasks import async, schedule, result, fetch
from .models import Task, Schedule
VERSION = (0, 3, 0)
default_app_config = 'django_q.apps.DjangoQConfig'
| mit | Python |
5c1833f8d056015d86ef1ef726a8097269db9d84 | bump version -> 0.3.0-dev | jmcarp/webargs,stas/webargs,nealrs/webargs,sloria/webargs,jmcarp/webargs,yufeiminds/webargs,Basis/webargs,hyunchel/webargs | webargs/__init__.py | webargs/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.3.0-dev'
__author__ = 'Steven Loria'
__license__ = "MIT"
from webargs.core import Arg, WebargsError, ValidationError
| # -*- coding: utf-8 -*-
__version__ = '0.2.0'
__author__ = 'Steven Loria'
__license__ = "MIT"
from webargs.core import Arg, WebargsError, ValidationError
| mit | Python |
0af39adef5a4c3453b7adb95a793f3d2720a67b2 | fix import bug | BeanYoung/beanstalkw | beanstalkw/client.py | beanstalkw/client.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import thread
import beanstalkc
import job
class Client(object):
def __init__(self, host, port):
self.connection = beanstalkc.Connection(host, port)
self.current_tube = 'default'
self.write_lock = thread.allocate_lock()
def _put(self, job):
if self.current_tube != job.tube:
self.connection.use(job.tube)
self.tube = job.tube
return self.connection.put(
json.dumps(job.body),
job.priority,
job.delay,
job.ttr)
def put(self, job):
with self.write_lock:
try:
return self._put(job)
except beanstalkc.SocketError, e:
self.connection.reconnect()
self.current_tube = 'default'
return self._put(job)
def enqueue(self, func, args, kwargs, tube, priority, delay, ttr):
return self.put(job.Job(func, args, kwargs, priority, delay, ttr))
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import thread.allocate_lock
import beanstalkc
import job
class Client(object):
def __init__(self, host, port):
self.connection = beanstalkc.Connection(host, port)
self.current_tube = 'default'
self.write_lock = thread.allocate_lock()
def _put(self, job):
if self.current_tube != job.tube:
self.connection.use(job.tube)
self.tube = job.tube
return self.connection.put(
json.dumps(job.body),
job.priority,
job.delay,
job.ttr)
def put(self, job):
with self.write_lock:
try:
return self._put(job)
except beanstalkc.SocketError, e:
self.connection.reconnect()
self.current_tube = 'default'
return self._put(job)
def enqueue(self, func, args, kwargs, tube, priority, delay, ttr):
return self.put(job.Job(func, args, kwargs, priority, delay, ttr))
| mit | Python |
195032a3ece262bcec2a8bebc7f443979caa0d58 | move function | qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | corehq/apps/change_feed/topics.py | corehq/apps/change_feed/topics.py | from kafka.common import OffsetRequest
from corehq.apps.change_feed.connection import get_kafka_client
from .document_types import CASE, FORM, DOMAIN, META, APP
# this is redundant but helps avoid import warnings until nothing references these
CASE = CASE
FORM = FORM
DOMAIN = DOMAIN
META = META
APP = APP
# new models
CASE_SQL = 'case-sql'
FORM_SQL = 'form-sql'
SMS = 'sms'
LEDGER = 'ledger'
COMMCARE_USER = 'commcare-user'
GROUP = 'group'
WEB_USER = 'web-user'
ALL = (
CASE,
CASE_SQL,
COMMCARE_USER,
DOMAIN,
FORM,
FORM_SQL,
GROUP,
LEDGER,
META,
SMS,
WEB_USER,
APP,
)
def get_topic(document_type_object):
return document_type_object.primary_type
def get_topic_offset(topic):
"""
:returns: The kafka offset for the topic."""
return get_multi_topic_offset([topic])[topic]
def get_all_offsets():
"""
:returns: A dict of offsets keyed by topic"""
return get_multi_topic_offset(ALL)
def get_multi_topic_offset(topics):
"""
:returns: A dict of offsets keyed by topic"""
assert set(topics) <= set(ALL)
client = get_kafka_client()
offset_requests = [OffsetRequest(topic, 0, -1, 1) for topic in topics]
responses = client.send_offset_request(offset_requests)
return {
r.topic: r.offsets[0] for r in responses
}
| from kafka.common import OffsetRequest
from corehq.apps.change_feed.connection import get_kafka_client
from .document_types import CASE, FORM, DOMAIN, META, APP
# this is redundant but helps avoid import warnings until nothing references these
CASE = CASE
FORM = FORM
DOMAIN = DOMAIN
META = META
APP = APP
# new models
CASE_SQL = 'case-sql'
FORM_SQL = 'form-sql'
SMS = 'sms'
LEDGER = 'ledger'
COMMCARE_USER = 'commcare-user'
GROUP = 'group'
WEB_USER = 'web-user'
ALL = (
CASE,
CASE_SQL,
COMMCARE_USER,
DOMAIN,
FORM,
FORM_SQL,
GROUP,
LEDGER,
META,
SMS,
WEB_USER,
APP,
)
def get_topic(document_type_object):
return document_type_object.primary_type
def get_topic_offset(topic):
"""
:returns: The kafka offset for the topic."""
return get_multi_topic_offset([topic])[topic]
def get_multi_topic_offset(topics):
"""
:returns: A dict of offsets keyed by topic"""
assert set(topics) <= set(ALL)
client = get_kafka_client()
offset_requests = [OffsetRequest(topic, 0, -1, 1) for topic in topics]
responses = client.send_offset_request(offset_requests)
return {
r.topic: r.offsets[0] for r in responses
}
def get_all_offsets():
"""
:returns: A dict of offsets keyed by topic"""
return get_multi_topic_offset(ALL)
| bsd-3-clause | Python |
1093fe0328fb722d05a5d84afff7494e5df21216 | Update clean_mac_info_plist.py | TacoCoin/tacocoin,TacoCoin/tacocoin,TacoCoin/tacocoin,TacoCoin/tacocoin,TacoCoin/tacocoin | share/qt/clean_mac_info_plist.py | share/qt/clean_mac_info_plist.py | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Tacocoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Tacocoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"tacocoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Litecoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Litecoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| mit | Python |
9cb694e3ca0f5c0296579f427c59558ac7bfa59b | Update python example | xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet | snippet/example/python/gunicorn-conf.py | snippet/example/python/gunicorn-conf.py | # -*- encoding: utf-8 -*-
# Notice: This config file is a Python module file.
import multiprocessing
appname = "{APPNAME}"
# [Gunicorn Setting]
daemon = True
bind = "0.0.0.0:10100"
errorlog = "/log/{appname}/{appname}.log".format(appname=appname)
pidfile = "/log/{appname}/{appname}.pid".format(appname=appname)
proc_name = appname
worker_class = "gevent"
worker_connections = 10000
workers = multiprocessing.cpu_count() * 2
raw_env = "APP_CONFIG={0}".format(__file__)
# [App Setting]
| # -*- encoding: utf-8 -*-
# Notice: This config file is a Python module file.
import multiprocessing
appname = "sgapi"
# [Gunicorn Setting]
daemon = True
bind = "0.0.0.0:10100"
errorlog = "/log/sgapi/{}.log".format(appname)
pidfile = "/log/sgapi/{}.pid".format(appname)
proc_name = appname
worker_class = "gevent"
worker_connections = 10000
workers = multiprocessing.cpu_count() * 2
raw_env = "APP_CONFIG={}".format(__file__)
# [App Setting]
| mit | Python |
f63a174dd35731b6737e4f653139d92bd2f57aef | Add a location destroyed hook | sangoma/pytestlab | lab/hookspec.py | lab/hookspec.py | import pytest
@pytest.hookspec
def pytest_lab_configure(envmanager):
"""pytestlab startup"""
@pytest.hookspec(historic=True)
def pytest_lab_addroles(config, rolemanager):
"""new role registered"""
# TODO: Hook for publishing new role **should not** be historic - this
# no longer makes sense. Roles can now disappear before the historic
# hook can be triggered. Any plugin that cares about having a complete
# canonical list of roles should talk directly to the role manager
# instead.
@pytest.hookspec(historic=True)
def pytest_lab_role_created(config, ctl):
"""Called when a new role controller is created (and loaded) at a
location.
"""
@pytest.hookspec
def pytest_lab_role_destroyed(config, ctl):
"""Called when a role controller is destroyed.
"""
@pytest.hookspec
def pytest_lab_location_destroyed(config, location):
"""Called when a location is released by the environment manager.
"""
@pytest.hookspec
def pytest_lab_add_providers(config, providermanager):
"""Called to enable adding addtional/external environment providers.
"""
| import pytest
@pytest.hookspec
def pytest_lab_configure(envmanager):
"""pytestlab startup"""
@pytest.hookspec(historic=True)
def pytest_lab_addroles(config, rolemanager):
"""new role registered"""
# TODO: Hook for publishing new role **should not** be historic - this
# no longer makes sense. Roles can now disappear before the historic
# hook can be triggered. Any plugin that cares about having a complete
# canonical list of roles should talk directly to the role manager
# instead.
@pytest.hookspec(historic=True)
def pytest_lab_role_created(config, ctl):
"""Called when a new role controller is created (and loaded) at a
location.
"""
@pytest.hookspec
def pytest_lab_role_destroyed(config, ctl):
"""Called when a role controller is destroyed.
"""
@pytest.hookspec
def pytest_lab_add_providers(config, providermanager):
"""Called to enable adding addtional/external environment providers.
"""
| mpl-2.0 | Python |
fbe7b34c575e30114c54587952c9aa919bc28d81 | Add import of django-annoying patch | theatlantic/django-south,theatlantic/django-south | south/introspection_plugins/__init__.py | south/introspection_plugins/__init__.py | # This module contains built-in introspector plugins for various common
# Django apps.
# These imports trigger the lower-down files
import south.introspection_plugins.geodjango
import south.introspection_plugins.django_tagging
import south.introspection_plugins.django_taggit
import south.introspection_plugins.django_objectpermissions
import south.introspection_plugins.annoying_autoonetoone
| # This module contains built-in introspector plugins for various common
# Django apps.
# These imports trigger the lower-down files
import south.introspection_plugins.geodjango
import south.introspection_plugins.django_tagging
import south.introspection_plugins.django_taggit
import south.introspection_plugins.django_objectpermissions
| apache-2.0 | Python |
b94286de30fdd154cfcd1c88889819c047693f7a | Fix regression test | explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,recognai/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,honnibal/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,spacy-io/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,explosion/spaCy,recognai/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,explosion/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,honnibal/spaCy,recognai/spaCy,recognai/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,explosion/spaCy | spacy/tests/regression/test_issue595.py | spacy/tests/regression/test_issue595.py | # coding: utf-8
from __future__ import unicode_literals
from ...symbols import POS, VERB, VerbForm_inf
from ...vocab import Vocab
from ...lemmatizer import Lemmatizer
from ..util import get_doc
import pytest
def test_issue595():
"""Test lemmatization of base forms"""
words = ["Do", "n't", "feed", "the", "dog"]
tag_map = {'VB': {POS: VERB, VerbForm_inf: True}}
rules = {"verb": [["ed", "e"]]}
lemmatizer = Lemmatizer({'verb': {}}, {'verb': {}}, rules)
vocab = Vocab(lemmatizer=lemmatizer, tag_map=tag_map)
doc = get_doc(vocab, words)
doc[2].tag_ = 'VB'
assert doc[2].text == 'feed'
assert doc[2].lemma_ == 'feed'
| # coding: utf-8
from __future__ import unicode_literals
from ...symbols import POS, VERB, VerbForm_inf
from ...vocab import Vocab
from ...lemmatizer import Lemmatizer
from ..util import get_doc
import pytest
def test_issue595():
"""Test lemmatization of base forms"""
words = ["Do", "n't", "feed", "the", "dog"]
tag_map = {'VB': {POS: VERB, 'morph': VerbForm_inf}}
rules = {"verb": [["ed", "e"]]}
lemmatizer = Lemmatizer({'verb': {}}, {'verb': {}}, rules)
vocab = Vocab(lemmatizer=lemmatizer, tag_map=tag_map)
doc = get_doc(vocab, words)
doc[2].tag_ = 'VB'
assert doc[2].text == 'feed'
assert doc[2].lemma_ == 'feed'
| mit | Python |
79a78eed1198b83a785682f0adcbf1a6b9bf09ae | Fix Dinner donor subscription names. | astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin | astrobin_apps_donations/utils.py | astrobin_apps_donations/utils.py | from subscription.models import UserSubscription
SUBSCRIPTION_NAMES = (
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstroBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstroBin Donor Dinner Yearly',
)
def donations_user_get_subscription(user):
try:
return UserSubscription.objects.get(user = user, subscription__name__in = SUBSCRIPTION_NAMES)
except UserSubscription.DoesNotExist:
return None
except UserSubscription.MultipleObjectsReturned:
return UserSubscription.objects.filter(user = user, subscription__name__in = SUBSCRIPTION_NAMES)[0]
def donations_user_get_active_subscription(user):
try:
us = UserSubscription.objects.get(user = user, subscription__name__in = SUBSCRIPTION_NAMES, active = True, cancelled = False)
except UserSubscription.DoesNotExist:
return None
except UserSubscription.MultipleObjectsReturned:
us = UserSubscription.objects.filter(user = user, subscription__name__in = SUBSCRIPTION_NAMES, active = True, cancelled = False)[0]
if us.expired():
return None
return us
def donations_user_has_subscription(user):
try:
donations_user_get_subscription(user)
except UserSubscription.DoesNotExist:
return False
return True
def donations_user_has_active_subscription(user):
try:
us = donations_user_get_active_subscription(user)
except UserSubscription.DoesNotExist:
return False
if us:
return us.active and not us.cancelled and not us.expired()
False
def donations_user_has_inactive_subscription(user):
active = donations_user_has_active_subscription(user)
if active:
return False
try:
us = donations_user_get_subscription(user)
except UserSubscription.DoesNotExist:
return False
return not us.active or us.cancelled or us.expired()
| from subscription.models import UserSubscription
SUBSCRIPTION_NAMES = (
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstorBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstorBin Donor Dinner Yearly',
)
def donations_user_get_subscription(user):
try:
return UserSubscription.objects.get(user = user, subscription__name__in = SUBSCRIPTION_NAMES)
except UserSubscription.DoesNotExist:
return None
except UserSubscription.MultipleObjectsReturned:
return UserSubscription.objects.filter(user = user, subscription__name__in = SUBSCRIPTION_NAMES)[0]
def donations_user_get_active_subscription(user):
try:
us = UserSubscription.objects.get(user = user, subscription__name__in = SUBSCRIPTION_NAMES, active = True, cancelled = False)
except UserSubscription.DoesNotExist:
return None
except UserSubscription.MultipleObjectsReturned:
us = UserSubscription.objects.filter(user = user, subscription__name__in = SUBSCRIPTION_NAMES, active = True, cancelled = False)[0]
if us.expired():
return None
return us
def donations_user_has_subscription(user):
try:
donations_user_get_subscription(user)
except UserSubscription.DoesNotExist:
return False
return True
def donations_user_has_active_subscription(user):
try:
us = donations_user_get_active_subscription(user)
except UserSubscription.DoesNotExist:
return False
if us:
return us.active and not us.cancelled and not us.expired()
False
def donations_user_has_inactive_subscription(user):
active = donations_user_has_active_subscription(user)
if active:
return False
try:
us = donations_user_get_subscription(user)
except UserSubscription.DoesNotExist:
return False
return not us.active or us.cancelled or us.expired()
| agpl-3.0 | Python |
1947d222830803093a37f7ca90ff889908692fcb | add explicit check against Python 3.0/3.1. | jwilk/anorack,jwilk/anorack | lib/__init__.py | lib/__init__.py | '''
anorack's private modules
'''
import sys
type(...) # Python >= 3 is required
if sys.version_info < (3, 2):
raise RuntimeError('Python >= 3.2 is required')
__all__ = []
# vim:ts=4 sts=4 sw=4 et
| '''
anorack's private modules
'''
type(...) # Python >= 3 is required
| mit | Python |
23ac4ade910b664f3fbb9fd634da6a8a32b620c7 | Add specific failing string detail to datetime format test (#148) | cdubz/babybuddy,cdubz/babybuddy,cdubz/babybuddy | babybuddy/tests/tests_formats.py | babybuddy/tests/tests_formats.py | # -*- coding: utf-8 -*-
import datetime
from django.core.exceptions import ValidationError
from django.forms.fields import DateTimeField
from django.test import TestCase
class FormatsTestCase(TestCase):
def test_datetime_input_formats(self):
field = DateTimeField()
supported_custom_examples = [
'01/20/2020 9:30 AM',
'01/20/2020 9:30:03 AM',
'10/01/2020 11:30 PM',
'10/01/2020 11:30:03 AM',
]
for example in supported_custom_examples:
try:
result = field.to_python(example)
self.assertIsInstance(result, datetime.datetime)
except ValidationError:
self.fail('Format of "{}" not recognized!'.format(example))
with self.assertRaises(ValidationError):
field.to_python('invalid date string!')
| # -*- coding: utf-8 -*-
import datetime
from django.core.exceptions import ValidationError
from django.forms.fields import DateTimeField
from django.test import TestCase
class FormatsTestCase(TestCase):
def test_datetime_input_formats(self):
field = DateTimeField()
supported_custom_examples = [
'01/20/2020 9:30 AM',
'01/20/2020 9:30:03 AM',
'10/01/2020 11:30 PM',
'10/01/2020 11:30:03 AM',
]
for example in supported_custom_examples:
result = field.to_python(example)
self.assertIsInstance(result, datetime.datetime)
with self.assertRaises(ValidationError):
field.to_python('invalid date string!')
| bsd-2-clause | Python |
b8526da20e215e883da78a42d8034b2d9a4d2f82 | update simple_client.py | cit/libtorrent-peer-idol,cit/libtorrent-peer-idol,cit/libtorrent-peer-idol,kuro/libtorrent,kuro/libtorrent,kuro/libtorrent,cscheid/libtorrent,cit/libtorrent-peer-idol,kuro/libtorrent,cscheid/libtorrent,cscheid/libtorrent,cscheid/libtorrent,cit/libtorrent-peer-idol,cscheid/libtorrent,cit/libtorrent-peer-idol,cscheid/libtorrent | bindings/python/simple_client.py | bindings/python/simple_client.py | #!/bin/python
# Copyright Arvid Norberg 2008. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import libtorrent as lt
import time
ses = lt.session()
ses.listen_on(6881, 6891)
e = lt.bdecode(open("test.torrent", 'rb').read())
info = lt.torrent_info(e)
atp = {}
atp["ti"] = info
atp["save_path"] = "./"
atp["storage_mode"] = lt.storage_mode_t(1)
atp["paused"] = False
atp["auto_managed"] = True
atp["duplicate_is_error"] = True
h = ses.add_torrent(atp)
while (not h.is_seed()):
s = h.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating']
print '\r%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d) %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, state_str[s.state]),
time.sleep(1)
| #!/bin/python
# Copyright Arvid Norberg 2008. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import libtorrent as lt
import time
ses = lt.session()
ses.listen_on(6881, 6891)
e = lt.bdecode(open("test.torrent", 'rb').read())
info = lt.torrent_info(e)
h = ses.add_torrent(info, "./")
while (not h.is_seed()):
s = h.status()
state_str = ['queued', 'checking', 'connecting', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating']
print '\r%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d) %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, state_str[s.state]),
time.sleep(1)
| bsd-3-clause | Python |
0d8585a2ab57ca4d8f3f4ee2429a2137f2045c6a | Return 0 as an int rather than a string. | tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status | bumblebee/modules/arch-update.py | bumblebee/modules/arch-update.py | """Check updates to Arch Linux."""
import subprocess
import bumblebee.input
import bumblebee.output
import bumblebee.engine
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
widget = bumblebee.output.Widget(full_text=self.utilization)
super(Module, self).__init__(engine, config, widget)
self.packages = self.check_updates()
def check_updates(self):
p = subprocess.Popen(
"checkupdates", stdout=subprocess.PIPE, shell=True)
p_status = p.wait()
if p_status == 0:
(output, err) = p.communicate()
output = output.decode('utf-8')
packages = output.split('\n')
packages.pop()
return len(packages)
return 0
def utilization(self, widget):
return 'Update Arch: {}'.format(self.packages)
def hidden(self):
return self.check_updates() == 0
def update(self, widgets):
self.packages = self.check_updates()
def state(self, widget):
return self.threshold_state(self.packages, 1, 100)
| """Check updates to Arch Linux."""
import subprocess
import bumblebee.input
import bumblebee.output
import bumblebee.engine
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
widget = bumblebee.output.Widget(full_text=self.utilization)
super(Module, self).__init__(engine, config, widget)
self.packages = self.check_updates()
def check_updates(self):
p = subprocess.Popen(
"checkupdates", stdout=subprocess.PIPE, shell=True)
p_status = p.wait()
if p_status == 0:
(output, err) = p.communicate()
output = output.decode('utf-8')
packages = output.split('\n')
packages.pop()
return len(packages)
return '0'
def utilization(self, widget):
return 'Update Arch: {}'.format(self.packages)
def hidden(self):
return self.check_updates() == 0
def update(self, widgets):
self.packages = self.check_updates()
def state(self, widget):
return self.threshold_state(self.packages, 1, 100)
| mit | Python |
5d89191538f2fb2a45e8fbb2c50c1500b3cd1a4a | Bump version to 3.1.1 | mar10/wsgidav,mar10/wsgidav,mar10/wsgidav,mar10/wsgidav | wsgidav/__init__.py | wsgidav/__init__.py | # -*- coding: utf-8 -*-
"""
Current WsgiDAV version number.
See https://www.python.org/dev/peps/pep-0440
Examples
Pre-releases (alpha, beta, release candidate):
'3.0.0a1', '3.0.0b1', '3.0.0rc1'
Final Release:
'3.0.0'
Developmental release (to mark 3.0.0 as 'used'. Don't publish this):
'3.0.0.dev1'
NOTE:
When pywin32 is installed, number must be a.b.c for MSI builds?
"3.0.0a4" seems not to work in this case!
"""
__version__ = "3.1.1"
# make version accessible as 'wsgidav.__version__'
# from wsgidav._version import __version__ # noqa: F401
# Initialize a silent 'wsgidav' logger
# http://docs.python-guide.org/en/latest/writing/logging/#logging-in-a-library
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
import logging
_base_logger = logging.getLogger(__name__)
_base_logger.addHandler(logging.NullHandler())
_base_logger.propagate = False
_base_logger.setLevel(logging.INFO)
| # -*- coding: utf-8 -*-
"""
Current WsgiDAV version number.
See https://www.python.org/dev/peps/pep-0440
Examples
Pre-releases (alpha, beta, release candidate):
'3.0.0a1', '3.0.0b1', '3.0.0rc1'
Final Release:
'3.0.0'
Developmental release (to mark 3.0.0 as 'used'. Don't publish this):
'3.0.0.dev1'
NOTE:
When pywin32 is installed, number must be a.b.c for MSI builds?
"3.0.0a4" seems not to work in this case!
"""
__version__ = "3.1.1-a1"
# make version accessible as 'wsgidav.__version__'
# from wsgidav._version import __version__ # noqa: F401
# Initialize a silent 'wsgidav' logger
# http://docs.python-guide.org/en/latest/writing/logging/#logging-in-a-library
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
import logging
_base_logger = logging.getLogger(__name__)
_base_logger.addHandler(logging.NullHandler())
_base_logger.propagate = False
_base_logger.setLevel(logging.INFO)
| mit | Python |
0c43c2305ea5ba81011dfc95593d7c725212760a | Bump version | python-hyper/wsproto | wsproto/__init__.py | wsproto/__init__.py | # -*- coding: utf-8 -*-
"""
wsproto
~~~
A WebSocket implementation.
"""
__version__ = "0.11.0"
| # -*- coding: utf-8 -*-
"""
wsproto
~~~
A WebSocket implementation.
"""
__version__ = "0.10.0"
| mit | Python |
7e5fd0ad1f9e6f77e602d0bb9e2757d239df46f6 | Allow empty location. | cfengine/documentation-generator,michaelclelland/documentation-generator,michaelclelland/documentation-generator,nickanderson/documentation-generator,stweil/documentation-generator,cfengine/documentation-generator,stweil/documentation-generator,cfengine/documentation-generator,stweil/documentation-generator,nickanderson/documentation-generator,stweil/documentation-generator,michaelclelland/documentation-generator,cfengine/documentation-generator,cfengine/documentation-generator,michaelclelland/documentation-generator,stweil/documentation-generator,nickanderson/documentation-generator,michaelclelland/documentation-generator,nickanderson/documentation-generator,nickanderson/documentation-generator | _scripts/cfdoc_qa.py | _scripts/cfdoc_qa.py | # The MIT License (MIT)
#
# Copyright (c) 2013 CFEngine AS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
from time import gmtime, strftime
def OpenLogFile(config):
logfilename = config["log_file"]
write_header = not os.path.exists(logfilename)
logfile = open(logfilename, "a")
if write_header:
logfile.write("---\n")
logfile.write("layout = printable\n")
logfile.write("title = Documentation Issues\n")
logfile.write("publised = true\n")
logfile.write("alias = cfdoc_log.html\n")
logfile.write("---\n")
logfile.write("\n")
logfile.write("Documentation generated at %s GMT\n" % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
logfile.write("\n")
return logfile
def LogProcessStart(config, string):
logfile = OpenLogFile(config)
logfile.write("\n")
logfile.write("### %s\n" % string)
logfile.write("\n")
def LogMissingDocumentation(config, element, strings, location):
logfile = OpenLogFile(config)
logfile.write("* `" + element + "`: ")
for string in strings:
logfile.write("`" + string + "` ")
logfile.write("\n")
if len(location):
logfile.write(" * Source location: %s\n" % location)
logfile.write(" * Triggered by: %s (%d)\n" % (os.path.relpath(config["context_current_file"]), config["context_current_line_number"]))
logfile.close()
| # The MIT License (MIT)
#
# Copyright (c) 2013 CFEngine AS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
from time import gmtime, strftime
def OpenLogFile(config):
logfilename = config["log_file"]
write_header = not os.path.exists(logfilename)
logfile = open(logfilename, "a")
if write_header:
logfile.write("---\n")
logfile.write("layout = printable\n")
logfile.write("title = Documentation Issues\n")
logfile.write("publised = true\n")
logfile.write("alias = cfdoc_log.html\n")
logfile.write("---\n")
logfile.write("\n")
logfile.write("Documentation generated at %s GMT\n" % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
logfile.write("\n")
return logfile
def LogProcessStart(config, string):
logfile = OpenLogFile(config)
logfile.write("\n")
logfile.write("### %s\n" % string)
logfile.write("\n")
def LogMissingDocumentation(config, element, strings, location):
logfile = OpenLogFile(config)
logfile.write("* `" + element + "`: ")
for string in strings:
logfile.write("`" + string + "` ")
logfile.write("\n")
logfile.write(" * Source location: %s\n" % location)
logfile.write(" * Triggered by: %s (%d)\n" % (os.path.relpath(config["context_current_file"]), config["context_current_line_number"]))
logfile.close()
| mit | Python |
89ebf52fa951091304fe18b8cdfb939b2da9f602 | remove extra quotes in bullet_train key | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/utils.py | accelerator/utils.py | from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from bullet_train import BulletTrain
from django.conf import settings
def create_mc_permission(permission):
ct, _ = ContentType.objects.get_or_create(
app_label="mc",
model=permission.content_type.model)
new_perm, _ = Permission.objects.get_or_create(
name=permission.name,
content_type=ct,
codename=permission.codename)
for group in permission.group_set.all():
group.permissions.add(new_perm)
for user in permission.user_set.all():
user.user_permissions.add(new_perm)
def bullet_train_has_feature(feature_name):
if settings.BULLET_TRAIN_API_KEY:
bullet_tran_key = settings.BULLET_TRAIN_API_KEY.strip('"')
else:
bullet_tran_key = ''
bt = BulletTrain(environment_id=bullet_tran_key)
if bt:
if bt.has_feature(feature_name):
return bt.feature_enabled(feature_name)
return False
| from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
from bullet_train import BulletTrain
from django.conf import settings
def create_mc_permission(permission):
ct, _ = ContentType.objects.get_or_create(
app_label="mc",
model=permission.content_type.model)
new_perm, _ = Permission.objects.get_or_create(
name=permission.name,
content_type=ct,
codename=permission.codename)
for group in permission.group_set.all():
group.permissions.add(new_perm)
for user in permission.user_set.all():
user.user_permissions.add(new_perm)
def bullet_train_has_feature(feature_name):
bt = BulletTrain(environment_id=settings.BULLET_TRAIN_API_KEY)
if bt:
if bt.has_feature(feature_name):
return bt.feature_enabled(feature_name)
return False
| mit | Python |
5c828795ad9b3322cad817914826dfc8cbe82b22 | fix import bug | Answeror/aip,Answeror/aip | aip/imfs/__init__.py | aip/imfs/__init__.py | from .base import ImfsError, NotFoundError, ConnectionError
| mit | Python | |
5f422542135a5009a02815ece5c1cade2fbe9ba7 | fix up normalize_utf8 method | nikken1/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,funginstitute/patentprocessor,yngcan/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor | lib/xml_util.py | lib/xml_util.py | #!/usr/bin/env python
"""
Collection of useful functions and tools for working with XML documents
"""
import re
from itertools import chain, izip
from unicodedata import normalize
def flatten(ls_of_ls):
"""
Takes in a list of lists, returns a new list of lists
where list `i` contains the `i`th element from the original
set of lists.
"""
return map(list, list(izip(*ls_of_ls)))
def extend_padding(ls_of_ls, padding=''):
"""
Takes in a lists of lists, returns a new list of lists
where each list is padded up to the length of the longest
list by [padding] (defaults to the empty string)
"""
maxlen = max(map(len, ls_of_ls))
newls = []
for ls in ls_of_ls:
if len(ls) != maxlen:
ls.extend([padding]*(maxlen - len(ls)))
newls.append(ls)
return newls
def escape_html_nosub(string):
"""
Escapes html sequences (e.g. <b></b>) that are not the known idiom
for subscript: <sub>...</sub>
"""
lt = re.compile('<(?!/?sub>)')
gt = re.compile('(?=.)*(?<!sub)>')
string = string.replace('&','&')
string = re.sub(lt,"<",string)
string = re.sub(gt,">",string)
return string
def has_content(l):
"""
Returns true if list [l] contains any non-null objects
"""
return any(filter(lambda x: x, l))
def normalize_utf8(string):
"""
Normalizes [string] to be UTF-8 encoded. Accepts both unicode and normal
Python strings.
"""
if isinstance(string, unicode):
return normalize('NFC', string)
else:
return normalize('NFC', string.decode('utf-8'))
| #!/usr/bin/env python
"""
Collection of useful functions and tools for working with XML documents
"""
import re
from itertools import chain, izip
from unicodedata import normalize
def flatten(ls_of_ls):
"""
Takes in a list of lists, returns a new list of lists
where list `i` contains the `i`th element from the original
set of lists.
"""
return map(list, list(izip(*ls_of_ls)))
def extend_padding(ls_of_ls, padding=''):
"""
Takes in a lists of lists, returns a new list of lists
where each list is padded up to the length of the longest
list by [padding] (defaults to the empty string)
"""
maxlen = max(map(len, ls_of_ls))
newls = []
for ls in ls_of_ls:
if len(ls) != maxlen:
ls.extend([padding]*(maxlen - len(ls)))
newls.append(ls)
return newls
def escape_html_nosub(string):
"""
Escapes html sequences (e.g. <b></b>) that are not the known idiom
for subscript: <sub>...</sub>
"""
lt = re.compile('<(?!/?sub>)')
gt = re.compile('(?=.)*(?<!sub)>')
string = string.replace('&','&')
string = re.sub(lt,"<",string)
string = re.sub(gt,">",string)
return string
def has_content(l):
"""
Returns true if list [l] contains any non-null objects
"""
return any(filter(lambda x: x, l))
def normalize_utf8(string):
"""
Normalizes [string] to be UTF-8 encoded. Accepts both unicode and normal
Python strings.
"""
if isinstance(string,unicode):
return normalize('NFC', string)
else:
return normalize('NFC', unicode(string))
| bsd-2-clause | Python |
21b405f1968dd75a663b253feb19d730af34d380 | Revert to old entry point. | danmcp/pman,FNNDSC/pman,danmcp/pman,FNNDSC/pman | docker-entrypoint.py | docker-entrypoint.py | #!/usr/bin/env python3
# Single entry point / dispatcher for simplified running of 'pman'
import os
from argparse import RawTextHelpFormatter
from argparse import ArgumentParser
str_desc = """
NAME
docker-entrypoint.py
SYNOPSIS
docker-entrypoint.py [optional cmd args for pman]
DESCRIPTION
'docker-entrypoint.py' is the main entrypoint for running the pman container.
"""
def pman_do(args, unknown):
str_otherArgs = ' '.join(unknown)
str_CMD = "/usr/local/bin/pman %s" % (str_otherArgs)
# str_CMD = "/usr/local/pman/bin/pman %s" % (str_otherArgs)
return str_CMD
parser = ArgumentParser(description = str_desc, formatter_class = RawTextHelpFormatter)
parser.add_argument(
'--msg',
action = 'store',
dest = 'msg',
default = '',
help = 'JSON msg payload'
)
args, unknown = parser.parse_known_args()
if __name__ == '__main__':
try:
fname = 'pman_do(args, unknown)'
str_cmd = eval(fname)
print(str_cmd)
os.system(str_cmd)
except:
print("Misunderstood container app... exiting.") | #!/usr/bin/env python3
# Single entry point / dispatcher for simplified running of 'pman'
import os
from argparse import RawTextHelpFormatter
from argparse import ArgumentParser
str_desc = """
NAME
docker-entrypoint.py
SYNOPSIS
docker-entrypoint.py [optional cmd args for pman]
DESCRIPTION
'docker-entrypoint.py' is the main entrypoint for running the pman container.
"""
def pman_do(args, unknown):
str_otherArgs = ' '.join(unknown)
str_CMD = "/usr/local/bin/pman %s" % (str_otherArgs)
str_CMD = "/usr/local/pman/bin/pman %s" % (str_otherArgs)
return str_CMD
parser = ArgumentParser(description = str_desc, formatter_class = RawTextHelpFormatter)
parser.add_argument(
'--msg',
action = 'store',
dest = 'msg',
default = '',
help = 'JSON msg payload'
)
args, unknown = parser.parse_known_args()
if __name__ == '__main__':
try:
fname = 'pman_do(args, unknown)'
str_cmd = eval(fname)
print(str_cmd)
os.system(str_cmd)
except:
print("Misunderstood container app... exiting.") | mit | Python |
624430e4571ee85e935c24b39cfe9520ef5499ce | Use new basekey location | mindw/libnacl,johnttan/libnacl,saltstack/libnacl,coinkite/libnacl,RaetProtocol/libnacl,cachedout/libnacl | libnacl/dual.py | libnacl/dual.py | '''
The dual key system allows for the creation of keypairs that contain both
cryptographic and signing keys
'''
# import libnacl libs
import libnacl
import libnacl.base
import libnacl.public
import libnacl.sign
class DualSecret(libnacl.base.BaseKey):
'''
Manage crypt and sign keys in one object
'''
def __init__(self, crypt=None, sign=None):
self.crypt = libnacl.public.SecretKey(crypt)
self.signer = libnacl.sign.Signer(sign)
self.sk = self.crypt.sk
self.seed = self.signer.seed
def sign(self, msg):
'''
Sign the given message
'''
return self.signer.sign(msg)
def signature(self, msg):
'''
Return just the signature for the message
'''
return self.signer.signature(msg)
| '''
The dual key system allows for the creation of keypairs that contain both
cryptographic and signing keys
'''
# import libnacl libs
import libnacl
import libnacl.utils
import libnacl.public
import libnacl.sign
class DualSecret(libnacl.utils.BaseKey):
'''
Manage crypt and sign keys in one object
'''
def __init__(self, crypt=None, sign=None):
self.crypt = libnacl.public.SecretKey(crypt)
self.signer = libnacl.sign.Signer(sign)
self.sk = self.crypt.sk
self.seed = self.signer.seed
def sign(self, msg):
'''
Sign the given message
'''
return self.signer.sign(msg)
def signature(self, msg):
'''
Return just the signature for the message
'''
return self.signer.signature(msg)
| apache-2.0 | Python |
548b740bc34b502fb965a93dc66382e1dd9f623c | Remove unused import | ASCIT/donut-python,ASCIT/donut,ASCIT/donut-python,ASCIT/donut,ASCIT/donut | donut/email_utils.py | donut/email_utils.py | import smtplib
from email.mime.text import MIMEText
def send_email(to, text, subject, use_prefix=True, group=None):
"""
Sends an email to a user. Expects 'to' to be a comma separated string of
emails, and for 'msg' and 'subject' to be strings. If group
is not none, the email is sent to a newsgroup and the to emails are hidden.
"""
msg = MIMEText(text)
if use_prefix and '[ASCIT Donut]' not in subject:
subject = '[ASCIT Donut] ' + subject
msg['Subject'] = subject
msg['From'] = 'auto@donut.caltech.edu'
if group:
msg['To'] = group.lower().replace(' ', '_')
else:
msg['To'] = to
with smtplib.SMTP('localhost') as s:
s.sendmail('auto@donut.caltech.edu', [to], msg.as_string())
| import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def send_email(to, text, subject, use_prefix=True, group=None):
"""
Sends an email to a user. Expects 'to' to be a comma separated string of
emails, and for 'msg' and 'subject' to be strings. If group
is not none, the email is sent to a newsgroup and the to emails are hidden.
"""
msg = MIMEText(text, "html")
if use_prefix and '[ASCIT Donut]' not in subject:
subject = '[ASCIT Donut] ' + subject
msg['Subject'] = subject
msg['From'] = 'auto@donut.caltech.edu'
if group:
msg['To'] = group.lower().replace(' ', '_')
else:
msg['To'] = to
with smtplib.SMTP('localhost') as s:
s.sendmail('auto@donut.caltech.edu', [to], msg.as_string())
| mit | Python |
6b98eb3a036f470eccb31bb12c378c55e56fdeb4 | Update __init__.py | gfilla/dsxtools | dsxtools/__init__.py | dsxtools/__init__.py |
from io import StringIO
from dsxtools.objectStore import objectStore
|
from dsxtools.objectStore import objectStore
| apache-2.0 | Python |
ff882c0b55bb46836d86189f2353a3806bbfa4ec | Bump version to 0.4 | grampajoe/django-tenant-templates | django_tenant_templates/__init__.py | django_tenant_templates/__init__.py | """
Django Tenant Templates
"""
from threading import local as _local
version = '0.4'
local = _local()
local.tenant_slug = None
| """
Django Tenant Templates
"""
from threading import local as _local
version = '0.3'
local = _local()
local.tenant_slug = None
| mit | Python |
151b78036067e2b8cec7bd0eabaad5d27b2c37c1 | Handle upstream timeouts gracefully | urfonline/api,urfonline/api,urfonline/api | api/streams/views.py | api/streams/views.py | from api.streams.models import StreamConfiguration
from django.http import JsonResponse, Http404
from django.http.request import HttpRequest
import requests
def get_stream_status(request: HttpRequest, stream_slug: str):
try:
stream = StreamConfiguration.objects.get(slug=stream_slug)
r = requests.get('http://{stream.host}:{stream.port}/status-json.xsl'.format(stream=stream), timeout=5)
except StreamConfiguration.DoesNotExist:
raise Http404("Stream with slug {0} does not exist.".format(stream_slug))
except requests.exceptions.Timeout:
return JsonResponse({ "error": "Upstream request timed out" }, status=504)
if r.status_code != requests.codes.ok:
return JsonResponse({ "error": "Upstream request failed" }, status=502)
return JsonResponse(r.json())
| from api.streams.models import StreamConfiguration
from django.http import JsonResponse, Http404
from django.http.request import HttpRequest
import requests
def get_stream_status(request: HttpRequest, stream_slug: str):
try:
stream = StreamConfiguration.objects.get(slug=stream_slug)
except StreamConfiguration.DoesNotExist:
raise Http404("Stream with slug {0} does not exist.".format(stream_slug))
r = requests.get('http://{stream.host}:{stream.port}/status-json.xsl'.format(stream=stream), timeout=5)
if r.status_code != requests.codes.ok:
return JsonResponse({ "error": "Upstream request failed" }, status=500)
return JsonResponse(r.json())
| mit | Python |
85106ebec37381abadc508f3495a92ca06cb8aa3 | Implement embedded property functionality | VoxelDavid/elixir | elixir/processors.py | elixir/processors.py | import os.path
from elixir import rbxmx
class BaseProcessor:
"""The primary processor class.
A processor is what compilers use to determine what happens when they
encounter a file or folder. All of the `process` methods return a new
instance from `elixir.rbx`.
For example, when processing a file, we return a new Script. The XML of
these instances is then appended into the hierarchy when compiling.
"""
def process_folder(self, name):
"""Processing for folders in the source directory.
name : str
The name of the folder to process. Excluding the extension.
"""
return rbxmx.ContainerElement(name=name)
def process_model(self, content):
"""Processing for ROBLOX Model files (.rbxmx).
content : str
The contents of the Model file.
"""
return rbxmx.ModelElement(content)
def _get_script_class(self, content):
if rbxmx.is_module(content):
return "ModuleScript"
else:
return "Script"
def process_script(self, name, content):
"""Processing for Lua files in the source directory.
name : str
The name of the Script.
content : str
The Lua source code.
"""
class_name = self._get_script_class(content)
script = rbxmx.ScriptElement(class_name, name=name, source=content)
script.use_embedded_properties()
return script
class NevermoreProcessor(BaseProcessor):
"""Processor for NevermoreEngine (Legacy).
This should be only used on or before commit b9b5a8 (linked below).
Nevermore was refactored and no longer requries this special handling.
This processor is kept here for legacy support.
https://github.com/Quenty/NevermoreEngine/tree/b9b5a836e4b5801ba19abfa2a5eab79921076542
"""
def process_script(self, name, content):
if name == "NevermoreEngineLoader":
return rbxmx.ScriptElement(name=name, source=content)
elif ".main" in name.lower():
return rbxmx.ScriptElement(name=name, source=content, disabled=True)
else:
return rbxmx.ScriptElement("ModuleScript", name=name, source=content)
| import os.path
from elixir import rbxmx
class BaseProcessor:
"""The primary processor class.
A processor is what compilers use to determine what happens when they
encounter a file or folder. All of the `process` methods return a new
instance from `elixir.rbx`.
For example, when processing a file, we return a new Script. The XML of
these instances is then appended into the hierarchy when compiling.
"""
def process_folder(self, name):
"""Processing for folders in the source directory.
name : str
The name of the folder to process. Excluding the extension.
"""
return rbxmx.ContainerElement(name=name)
def process_model(self, content):
"""Processing for ROBLOX Model files (.rbxmx).
content : str
The contents of the Model file.
"""
return rbxmx.ModelElement(content)
def _get_script_class(self, content):
if rbxmx.is_module(content):
return "ModuleScript"
else:
return "Script"
def process_script(self, name, content):
"""Processing for Lua files in the source directory.
name : str
The name of the Script.
content : str
The Lua source code.
"""
class_name = self._get_script_class(content)
return rbxmx.ScriptElement(class_name, name=name, source=content)
class NevermoreProcessor(BaseProcessor):
"""Processor for NevermoreEngine (Legacy).
This should be only used on or before commit b9b5a8 (linked below).
Nevermore was refactored and no longer requries this special handling.
This processor is kept here for legacy support.
https://github.com/Quenty/NevermoreEngine/tree/b9b5a836e4b5801ba19abfa2a5eab79921076542
"""
def process_script(self, name, content):
if name == "NevermoreEngineLoader":
return rbxmx.ScriptElement(name=name, source=content)
elif ".main" in name.lower():
return rbxmx.ScriptElement(name=name, source=content, disabled=True)
else:
return rbxmx.ScriptElement("ModuleScript", name=name, source=content)
| mit | Python |
ef63d85d887bc350f9df14acf8ea952384da4017 | Improve discriminator command formatting | Harmon758/Harmonbot,Harmon758/Harmonbot | Discord/cogs/user.py | Discord/cogs/user.py |
import discord
from discord.ext import commands
import inspect
from typing import Optional
from modules import utilities
from utilities import checks
def setup(bot):
bot.add_cog(User(bot))
class User(commands.Cog):
def __init__(self, bot):
self.bot = bot
for name, command in inspect.getmembers(self):
if isinstance(command, commands.Command) and command.parent is None and name != "user":
self.bot.add_command(command)
self.user.add_command(command)
# TODO: add commands
# TODO: role removal
@commands.group(aliases = ["member"], invoke_without_command = True, case_insensitive = True)
@checks.not_forbidden()
async def user(self, ctx):
'''
User
All user subcommands are also commands
'''
await ctx.send_help(ctx.command)
@commands.command(aliases = ["addrole"])
@commands.guild_only()
@checks.has_permissions_and_capability(manage_roles = True)
async def add_role(self, ctx, member : discord.Member, *, role : discord.Role):
'''Gives a user a role'''
await member.add_roles(role)
await ctx.embed_reply("I gave the role, {}, to {}".format(role, member))
@commands.command()
@checks.not_forbidden()
async def avatar(self, ctx, *, user: Optional[discord.Member]):
'''
See a bigger version of an avatar
Your own or someone else's avatar
'''
if not user:
await ctx.embed_reply(title = "Your avatar", image_url = ctx.author.avatar_url)
else:
await ctx.embed_reply(title = f"{user}'s avatar", image_url = user.avatar_url)
@commands.command()
@checks.not_forbidden()
async def discriminator(self, ctx, *, user: Optional[discord.Member]):
'''
Get a discriminator
Your own or someone else's discriminator
'''
if not user:
await ctx.embed_reply("Your discriminator: #" + ctx.author.discriminator)
else:
await ctx.embed_reply(f"{user.mention}'s discriminator: #{user.discriminator}",
footer_text = str(user), footer_icon_url = user.avatar_url)
@commands.command(name = "id")
@checks.not_forbidden()
async def id_command(self, ctx, *, user : discord.Member):
'''Get ID of user'''
# Include mention?
await ctx.embed_reply(user.id, footer_text = str(user), footer_icon_url = user.avatar_url)
@commands.command()
@checks.not_forbidden()
async def name(self, ctx, *, user : discord.Member):
'''The name of a user'''
await ctx.embed_reply(user.mention, footer_text = str(user), footer_icon_url = user.avatar_url)
|
import discord
from discord.ext import commands
import inspect
from typing import Optional
from modules import utilities
from utilities import checks
def setup(bot):
bot.add_cog(User(bot))
class User(commands.Cog):
def __init__(self, bot):
self.bot = bot
for name, command in inspect.getmembers(self):
if isinstance(command, commands.Command) and command.parent is None and name != "user":
self.bot.add_command(command)
self.user.add_command(command)
# TODO: add commands
# TODO: role removal
@commands.group(aliases = ["member"], invoke_without_command = True, case_insensitive = True)
@checks.not_forbidden()
async def user(self, ctx):
'''
User
All user subcommands are also commands
'''
await ctx.send_help(ctx.command)
@commands.command(aliases = ["addrole"])
@commands.guild_only()
@checks.has_permissions_and_capability(manage_roles = True)
async def add_role(self, ctx, member : discord.Member, *, role : discord.Role):
'''Gives a user a role'''
await member.add_roles(role)
await ctx.embed_reply("I gave the role, {}, to {}".format(role, member))
@commands.command()
@checks.not_forbidden()
async def avatar(self, ctx, *, user: Optional[discord.Member]):
'''
See a bigger version of an avatar
Your own or someone else's avatar
'''
if not user:
await ctx.embed_reply(title = "Your avatar", image_url = ctx.author.avatar_url)
else:
await ctx.embed_reply(title = f"{user}'s avatar", image_url = user.avatar_url)
@commands.command()
@checks.not_forbidden()
async def discriminator(self, ctx, *, user: Optional[discord.Member]):
'''
Get a discriminator
Your own or someone else's discriminator
'''
if not user:
await ctx.embed_reply("Your discriminator: #" + ctx.author.discriminator)
else:
await ctx.embed_reply(f"{user.mention}'s discriminator: #{user.discriminator}", footer_text = str(user), footer_icon_url = user.avatar_url)
@commands.command(name = "id")
@checks.not_forbidden()
async def id_command(self, ctx, *, user : discord.Member):
'''Get ID of user'''
# Include mention?
await ctx.embed_reply(user.id, footer_text = str(user), footer_icon_url = user.avatar_url)
@commands.command()
@checks.not_forbidden()
async def name(self, ctx, *, user : discord.Member):
'''The name of a user'''
await ctx.embed_reply(user.mention, footer_text = str(user), footer_icon_url = user.avatar_url)
| mit | Python |
67dea90ed39225f600988ea8eaa2906caca5e915 | Use cog check for wows cog | Harmon758/Harmonbot,Harmon758/Harmonbot | Discord/cogs/wows.py | Discord/cogs/wows.py |
from discord.ext import commands
import datetime
from utilities import checks
def setup(bot):
bot.add_cog(WoWS(bot))
class WoWS(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.api_urls = {"asia": "https://api.worldofwarships.asia/wows/",
"eu": "https://api.worldofwarships.eu/wows/",
"na": "https://api.worldofwarships.com/wows/",
"ru": "https://api.worldofwarships.ru/wows/"}
def cog_check(self, ctx):
return checks.not_forbidden_predicate(ctx)
@commands.group(aliases = ["worldofwarships", "world_of_warships"],
invoke_without_command = True, case_insensitive = True)
async def wows(self, ctx):
'''
World of Warships
Realms/Regions: Asia, EU, NA, RU (Default: NA)
'''
await ctx.send_help(ctx.command)
@wows.group(invoke_without_command = True, case_insensitive = True)
async def player(self, ctx, player : str, region : str = "NA"):
'''Player details'''
api_url = self.api_urls.get(region.lower(), "na")
params = {"application_id": ctx.bot.WARGAMING_APPLICATION_ID, "search": player, "limit": 1}
async with ctx.bot.aiohttp_session.get(api_url + "account/list/", params = params) as resp:
data = await resp.json()
if data["status"] == "error":
return await ctx.embed_reply(f":no_entry: Error: {data['error']['message']}")
if data["status"] != "ok":
return await ctx.embed_reply(":no_entry: Error")
if not data["meta"]["count"]:
return await ctx.embed_reply(":no_entry: Error: Player not found")
account_id = data["data"][0]["account_id"]
params = {"application_id": ctx.bot.WARGAMING_APPLICATION_ID, "account_id": account_id}
async with ctx.bot.aiohttp_session.get(api_url + "account/info/", params = params) as resp:
data = await resp.json()
if data["status"] == "error":
return await ctx.embed_reply(f":no_entry: Error: {data['error']['message']}")
if data["status"] != "ok":
return await ctx.embed_reply(":no_entry: Error")
data = data["data"][str(account_id)]
# TODO: handle hidden profile?
fields = [("ID", account_id), ("Account Level", data["leveling_tier"])]
fields.append(("Account XP", f"{data['leveling_points']:,}"))
fields.append(("Battles Fought", data["statistics"]["battles"]))
fields.append(("Miles Travelled", data["statistics"]["distance"]))
created_at = datetime.datetime.utcfromtimestamp(data["created_at"])
await ctx.embed_reply(title = data["nickname"], fields = fields,
footer_text = "Account Created", timestamp = created_at)
|
from discord.ext import commands
import datetime
from utilities import checks
def setup(bot):
bot.add_cog(WoWS(bot))
class WoWS(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.api_urls = {"asia": "https://api.worldofwarships.asia/wows/",
"eu": "https://api.worldofwarships.eu/wows/",
"na": "https://api.worldofwarships.com/wows/",
"ru": "https://api.worldofwarships.ru/wows/"}
@commands.group(aliases = ["worldofwarships", "world_of_warships"],
invoke_without_command = True, case_insensitive = True)
@checks.not_forbidden()
async def wows(self, ctx):
'''
World of Warships
Realms/Regions: Asia, EU, NA, RU (Default: NA)
'''
await ctx.send_help(ctx.command)
@wows.group(invoke_without_command = True, case_insensitive = True)
@checks.not_forbidden()
async def player(self, ctx, player : str, region : str = "NA"):
'''Player details'''
api_url = self.api_urls.get(region.lower(), "na")
params = {"application_id": ctx.bot.WARGAMING_APPLICATION_ID, "search": player, "limit": 1}
async with ctx.bot.aiohttp_session.get(api_url + "account/list/", params = params) as resp:
data = await resp.json()
if data["status"] == "error":
return await ctx.embed_reply(f":no_entry: Error: {data['error']['message']}")
if data["status"] != "ok":
return await ctx.embed_reply(":no_entry: Error")
if not data["meta"]["count"]:
return await ctx.embed_reply(":no_entry: Error: Player not found")
account_id = data["data"][0]["account_id"]
params = {"application_id": ctx.bot.WARGAMING_APPLICATION_ID, "account_id": account_id}
async with ctx.bot.aiohttp_session.get(api_url + "account/info/", params = params) as resp:
data = await resp.json()
if data["status"] == "error":
return await ctx.embed_reply(f":no_entry: Error: {data['error']['message']}")
if data["status"] != "ok":
return await ctx.embed_reply(":no_entry: Error")
data = data["data"][str(account_id)]
# TODO: handle hidden profile?
fields = [("ID", account_id), ("Account Level", data["leveling_tier"])]
fields.append(("Account XP", f"{data['leveling_points']:,}"))
fields.append(("Battles Fought", data["statistics"]["battles"]))
fields.append(("Miles Travelled", data["statistics"]["distance"]))
created_at = datetime.datetime.utcfromtimestamp(data["created_at"])
await ctx.embed_reply(title = data["nickname"], fields = fields,
footer_text = "Account Created", timestamp = created_at)
| mit | Python |
3dede50c099bfc340114c9b7448b8644e59112e1 | define cache_date_format in backend | ekeih/OmNomNom,ekeih/OmNomNom,sattelite/OmNomNom | backend/backend.py | backend/backend.py | from celery import Celery
from celery.utils.log import get_task_logger
from os import environ
from redis import Redis
logger = get_task_logger(__name__)
redis_host = environ.get('OMNOMNOM_REDIS_HOST') or 'localhost'
redis_port = environ.get('OMNOMNOM_REDIS_PORT') or 6379
cache_interval = environ.get('OMNOMNOM_CACHE_INTERVAL') or 60 * 60
cache_database = environ.get('OMNOMNOM_CACHE_DATABASE') or 0
celery_database = environ.get('OMNOMNOM_CELERY_DATABASE') or 1
cache = Redis(host=redis_host, port=redis_port, db=cache_database)
cache_date_format = '%Y-%m-%d'
app = Celery('backend',
broker='redis://%s:%s/%s' % (redis_host, redis_port, celery_database),
include=[
'canteens.cafenero',
'canteens.personalkantine',
'canteens.singh',
'canteens.studierendenwerk',
'omnomgram.tasks',
'stats.tasks'
]
)
app.conf.timezone = 'Europe/Berlin'
app.conf.beat_schedule = {
'update cafenero': {
'task': 'canteens.cafenero.update_cafenero',
'schedule': cache_interval
},
'update singh': {
'task': 'canteens.singh.update_singh',
'schedule': cache_interval
},
'update personalkantine': {
'task': 'canteens.personalkantine.update_personalkantine',
'schedule': cache_interval
},
'update en canteen': {
'task': 'canteens.personalkantine.update_en_canteen',
'schedule': cache_interval
},
'update studierendenwerk': {
'task': 'canteens.studierendenwerk.update_all_studierendenwerk_canteens',
'schedule': cache_interval
}
}
if __name__ == '__main__':
app.start()
| from celery import Celery
from celery.utils.log import get_task_logger
from os import environ
from redis import Redis
logger = get_task_logger(__name__)
redis_host = environ.get('OMNOMNOM_REDIS_HOST') or 'localhost'
redis_port = environ.get('OMNOMNOM_REDIS_PORT') or 6379
cache_interval = environ.get('OMNOMNOM_CACHE_INTERVAL') or 60 * 60
cache_database = environ.get('OMNOMNOM_CACHE_DATABASE') or 0
celery_database = environ.get('OMNOMNOM_CELERY_DATABASE') or 1
cache = Redis(host=redis_host, port=redis_port, db=cache_database)
app = Celery('backend',
broker='redis://%s:%s/%s' % (redis_host, redis_port, celery_database),
include=[
'canteens.cafenero',
'canteens.personalkantine',
'canteens.singh',
'canteens.studierendenwerk',
'omnomgram.tasks',
'stats.tasks'
]
)
app.conf.timezone = 'Europe/Berlin'
app.conf.beat_schedule = {
'update cafenero': {
'task': 'canteens.cafenero.update_cafenero',
'schedule': cache_interval
},
'update singh': {
'task': 'canteens.singh.update_singh',
'schedule': cache_interval
},
'update personalkantine': {
'task': 'canteens.personalkantine.update_personalkantine',
'schedule': cache_interval
},
'update en canteen': {
'task': 'canteens.personalkantine.update_en_canteen',
'schedule': cache_interval
},
'update studierendenwerk': {
'task': 'canteens.studierendenwerk.update_all_studierendenwerk_canteens',
'schedule': cache_interval
}
}
if __name__ == '__main__':
app.start()
| agpl-3.0 | Python |
64652f603c89d8871ce11af938ec3bf8a872f5d0 | use Template.set_metadata instead of add_metadata (#1864) | cloudtools/troposphere,cloudtools/troposphere | examples/Metadata.py | examples/Metadata.py | from troposphere import Template
t = Template()
t.set_description("Example to show adding a Metadata section to the template")
t.set_metadata({
"Comments": "Initial Draft",
"LastUpdated": "Jan 1st 2015",
"UpdatedBy": "First Last",
"Version": "V1.0",
})
print(t.to_json())
| from troposphere import Template
t = Template()
t.set_description("Example to show adding a Metadata section to the template")
t.add_metadata({
"Comments": "Initial Draft",
"LastUpdated": "Jan 1st 2015",
"UpdatedBy": "First Last",
"Version": "V1.0",
})
print(t.to_json())
| bsd-2-clause | Python |
045ef7b801e99a07b179694285aa104881f307bf | Add docstring to satisfy pylint | mininet/mininet,mininet/mininet,mininet/mininet | examples/__init__.py | examples/__init__.py | """
Mininet Examples
See README for details
"""
| # Mininet Examples
| bsd-3-clause | Python |
4228574868a2ab72214ed5d2f3fab55134d9c1bb | add traited example to debug script | fprados/nipype,carlohamalainen/nipype,grlee77/nipype,carolFrohlich/nipype,mick-d/nipype_source,iglpdc/nipype,wanderine/nipype,gerddie/nipype,rameshvs/nipype,carolFrohlich/nipype,fprados/nipype,glatard/nipype,FCP-INDI/nipype,carlohamalainen/nipype,mick-d/nipype,Leoniela/nipype,Leoniela/nipype,dgellis90/nipype,blakedewey/nipype,carolFrohlich/nipype,Leoniela/nipype,dgellis90/nipype,blakedewey/nipype,pearsonlab/nipype,arokem/nipype,arokem/nipype,rameshvs/nipype,gerddie/nipype,mick-d/nipype_source,iglpdc/nipype,mick-d/nipype,JohnGriffiths/nipype,grlee77/nipype,gerddie/nipype,grlee77/nipype,mick-d/nipype,JohnGriffiths/nipype,FCP-INDI/nipype,FredLoney/nipype,sgiavasis/nipype,dgellis90/nipype,mick-d/nipype_source,rameshvs/nipype,carlohamalainen/nipype,glatard/nipype,christianbrodbeck/nipype,pearsonlab/nipype,sgiavasis/nipype,dmordom/nipype,dmordom/nipype,glatard/nipype,FredLoney/nipype,wanderine/nipype,blakedewey/nipype,satra/NiPypeold,FCP-INDI/nipype,dmordom/nipype,wanderine/nipype,satra/NiPypeold,pearsonlab/nipype,sgiavasis/nipype,glatard/nipype,JohnGriffiths/nipype,christianbrodbeck/nipype,rameshvs/nipype,dgellis90/nipype,wanderine/nipype,sgiavasis/nipype,FCP-INDI/nipype,pearsonlab/nipype,arokem/nipype,mick-d/nipype,iglpdc/nipype,blakedewey/nipype,grlee77/nipype,carolFrohlich/nipype,arokem/nipype,FredLoney/nipype,JohnGriffiths/nipype,fprados/nipype,gerddie/nipype,iglpdc/nipype | examples/pe_debug.py | examples/pe_debug.py | import nipype.pipeline.engine as pe
import nipype.interfaces.spm as spm
import nipype.interfaces.fsl as fsl
reload(pe)
realign = pe.Node(spm.Realign(), name = 'spmrealign')
coreg = pe.Node(spm.Coregister(), name = 'coreg')
realign2 = pe.Node(spm.Realign(), name = 'spmrealign2')
bet = pe.MapNode(fsl.Bet(), iterfield=['infile'], name='bet')
w1 = pe.Workflow(name='spm')
w1.connect([(realign, coreg, [('realigned_files', 'source')])])
w1.inputs.spmrealign.fwhm = 0.5
assert(realign.inputs.fwhm == 0.5)
w2 = pe.Workflow(name='cplx')
w2.connect(w1, 'coreg.coregistered_files', realign2, 'infile')
inputs = w2.inputs
w2._generate_execgraph()
| import nipype.pipeline.engine as pe
import nipype.interfaces.spm as spm
import nipype.interfaces.fsl as fsl
reload(pe)
realign = pe.Node(spm.Realign(), name = 'spmrealign')
coreg = pe.Node(spm.Coregister(), name = 'coreg')
realign2 = pe.Node(spm.Realign(), name = 'spmrealign2')
bet = pe.MapNode(fsl.Bet(), iterfield=['infile'], name='bet')
w1 = pe.Workflow(name='spm')
w1.connect([(realign, coreg, [('realigned_files', 'source')])])
w2 = pe.Workflow(name='cplx')
w2.connect(w1, 'coreg.coregistered_files', realign2, 'infile')
inputs = w2.inputs
w2._generate_execgraph()
| bsd-3-clause | Python |
de209b4add50c900a78b828d1df6a3b6d0d58ff2 | fix flake8 error | jakevdp/altair,altair-viz/altair | altair/utils/__init__.py | altair/utils/__init__.py | from .core import (
infer_vegalite_type,
sanitize_dataframe,
parse_shorthand,
use_signature,
update_subtraits,
update_nested,
display_traceback,
SchemaBase,
Undefined
)
from .html import spec_to_html
from .plugin_registry import PluginRegistry
__all__ = (
'infer_vegalite_type',
'sanitize_dataframe',
'spec_to_html',
'parse_shorthand',
'use_signature',
'update_subtraits',
'update_nested',
'display_traceback',
'SchemaBase',
'Undefined',
'PluginRegistry'
)
| from .core import (
infer_vegalite_type,
sanitize_dataframe,
parse_shorthand,
use_signature,
update_subtraits,
update_nested,
display_traceback,
SchemaBase,
Undefined
)
from .html import spec_to_html
from .plugin_registry import PluginRegistry
__all__ = (
'infer_vegalite_type',
'sanitize_dataframe',
'parse_shorthand',
'use_signature',
'update_subtraits',
'update_nested',
'display_traceback',
'SchemaBase',
'Undefined',
'PluginRegistry'
)
| bsd-3-clause | Python |
9ff63d002293da44871307960d5b439b5e6ba48f | Remove 'lights' command for a while | alwye/spark-pi,alwye/spark-pi | app/commands/help.py | app/commands/help.py | def proc(command, message):
return {
"data": {
"status": "ok",
"html": """
<p>
Hi! I can control your Raspberry Pi. Send me the commands <b>in bold</b> to make me do stuff.<br><br>
📷 camera controls<br>
<b>camera photo</b>: I will take a photo and send it back<br>
⚙ subscription to events<br>
<b>event subscribe security</b>: if I detect motion, I'll send you a photo<br>
<b>event unsubscribe security</b>: I will stop sending photos<br>
</p>
"""
},
"response_required": True
}
| def proc(command, message):
return {
"data": {
"status": "ok",
"html": """
<p>
Hi! I can control your Raspberry Pi. Send me the commands <b>in bold</b> to make me do stuff.<br><br>
📷 camera controls<br>
<b>camera photo</b>: I will take a photo and send it back<br>
💡 light controls<br>
<b>lights on <i>color</i></b>: I will shine with the specified <i>color</i> (red, green, blue)<br>
<b>lights off</b>: I will stop shining!<br><br>
⚙ subscription to events<br>
<b>event subscribe security</b>: if I detect motion, I'll send you a photo<br>
<b>event unsubscribe security</b>: I will stop sending photos<br>
</p>
"""
},
"response_required": True
}
| mit | Python |
0a6b43f2202cb63aad18c119d7d46916b4d54873 | Make it easier to load the initial page | chadnickbok/librtcdcpp,chadnickbok/librtcdcpp | examples/site-api.py | examples/site-api.py | #!/usr/bin/env python
# Sets up a basic site that can allow two browsers to connect to each
# other via WebRTC DataChannels, sending connection events via WebSockets.
from flask import Flask, send_from_directory
from flask_sockets import Sockets
import json
app = Flask(__name__)
sockets = Sockets(app)
channels = {}
@sockets.route('/channel/<name>')
def channel_socket(ws, name):
if name in channels:
channels[name].append(ws)
else:
channels[name] = [ws]
while not ws.closed:
message = ws.receive()
print "Got msg:", message
if message is None:
continue
for other_ws in channels[name]:
if ws is not other_ws:
other_ws.send(message)
channels[name].remove(ws)
for other_ws in channels[name]:
other_ws.send(json.dumps({"type": "client_disconnected", "msg": {}}))
@app.route('/static/<path:path>')
def send_static(path):
return app.send_from_directory('static', path)
@app.route('/')
def serve_site():
return app.send_static_file("index.html")
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
server.serve_forever()
| #!/usr/bin/env python
# Sets up a basic site that can allow two browsers to connect to each
# other via WebRTC DataChannels, sending connection events via WebSockets.
from flask import Flask, send_from_directory
from flask_sockets import Sockets
import json
app = Flask(__name__)
sockets = Sockets(app)
channels = {}
@sockets.route('/channel/<name>')
def channel_socket(ws, name):
if name in channels:
channels[name].append(ws)
else:
channels[name] = [ws]
while not ws.closed:
message = ws.receive()
print "Got msg:", message
if message is None:
continue
for other_ws in channels[name]:
if ws is not other_ws:
other_ws.send(message)
channels[name].remove(ws)
for other_ws in channels[name]:
other_ws.send(json.dumps({"type": "client_disconnected", "msg": {}}))
@app.route('/static/<path:path>')
def send_static(path):
return app.send_from_directory('static', path)
@app.route('/index.html')
def serve_site():
return app.send_static_file("index.html")
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
server.serve_forever()
| bsd-3-clause | Python |
38f9b888539996fdfa7dfa3ca26f8545d535b7fd | Print statement as function call | goshippo/shippo-python-client | examples/tracking.py | examples/tracking.py | import shippo
'''
In this tutorial we have an order with a sender address,
recipient address and parcel information that we need to ship.
'''
# Replace <API-KEY> with your key
shippo.config.api_key = "<API-KEY>"
# Tracking based on a Shippo transaction
transaction_id = '<TRANSACTION-ID>'
transaction = shippo.Transaction.retrieve(transaction_id)
if transaction:
print(transaction.get('tracking_status'))
print(transaction.get('tracking_history'))
# Tracking based on carrier and tracking number
tracking_number = '9205590164917337534322'
# For full list of carrier tokens see https://goshippo.com/docs/reference#carriers
carrier_token = 'usps'
tracking = shippo.Track.get_status(carrier_token, tracking_number)
print(tracking)
# Registering a tracking webhook
webhook_response = shippo.Track.create(
carrier=carrier_token,
tracking_number=tracking_number,
metadata='optional, up to 100 characters'
)
print(webhook_response)
# For more tutorals of address validation, tracking, returns, refunds, and other functionality, check out our
# complete documentation: https://goshippo.com/docs/
| import shippo
'''
In this tutorial we have an order with a sender address,
recipient address and parcel information that we need to ship.
'''
# Replace <API-KEY> with your key
shippo.config.api_key = "<API-KEY>"
# Tracking based on a Shippo transaction
transaction_id = '<TRANSACTION-ID>'
transaction = shippo.Transaction.retrieve(transaction_id)
if transaction:
print(transaction.get('tracking_status'))
print(transaction.get('tracking_history'))
# Tracking based on carrier and tracking number
tracking_number = '9205590164917337534322'
# For full list of carrier tokens see https://goshippo.com/docs/reference#carriers
carrier_token = 'usps'
tracking = shippo.Track.get_status(carrier_token, tracking_number)
print(tracking)
# Registering a tracking webhook
webhook_response = shippo.Track.create(
carrier=carrier_token,
tracking_number=tracking_number,
metadata='optional, up to 100 characters'
)
print webhook_response
#For more tutorals of address validation, tracking, returns, refunds, and other functionality, check out our
#complete documentation: https://goshippo.com/docs/
| mit | Python |
13b65b7ddcb84d6a214cb3858070c6706b034e5b | Add transaction verification API | achamely/omniwallet,OmniLayer/omniwallet,Nevtep/omniwallet,Nevtep/omniwallet,VukDukic/omniwallet,habibmasuro/omniwallet,OmniLayer/omniwallet,habibmasuro/omniwallet,OmniLayer/omniwallet,habibmasuro/omniwallet,Nevtep/omniwallet,achamely/omniwallet,achamely/omniwallet,habibmasuro/omniwallet,VukDukic/omniwallet,OmniLayer/omniwallet,VukDukic/omniwallet,achamely/omniwallet,Nevtep/omniwallet | api/mastercoin_verify.py | api/mastercoin_verify.py | import os
import glob
from flask import Flask, request, jsonify, abort, json
data_dir_root = os.environ.get('DATADIR')
app = Flask(__name__)
app.debug = True
@app.route('/addresses')
def addresses():
currency_id = request.args.get('currency_id')
response = []
addr_glob = glob.glob(data_dir_root + '/addr/*.json')
for address_file in addr_glob:
with open(address_file, 'r') as f:
addr = json.load(f)
res = {
'address': addr['address']
}
if currency_id == '0':
btc_balance = [x['value'] for x in addr['balance'] if x['symbol'] == 'BTC'][0]
res['balance'] = float(btc_balance)
response.append(res)
else:
if currency_id == '1' or currency_id == '2':
msc_currency_id = str(int(currency_id) - 1) # Mastercoin-tools is off by one on currency id from the spec
if msc_currency_id in addr:
res['balance'] = float(addr[msc_currency_id]['balance'])
response.append(res)
json_response = json.dumps(response)
return json_response
@app.route('/transactions/<address>')
def transactions(address=None):
currency_id = request.args.get('currency_id')
print address, currency_id
if address == None:
abort(400)
if not exists(address):
abort(404)
addr = read(address)
transactions = []
tx_lists = ['accept_transactions', 'bought_transactions', 'exodus_transactions', 'offer_transactions', 'received_transactions', 'sent_transactions', 'sold_transactions']
if currency_id == '0':
return jsonify({ 'address': address, 'transactions': transactions }) # Punt on bitcoin transactions since we don't store them
if currency_id == '1' or currency_id == '2':
currency_id = str(int(currency_id) - 1) # Mastercoin-tools is off by one on currency id from the spec
if currency_id in addr:
for tx_i in tx_lists:
for tx in addr[currency_id][tx_i]:
transactions.append(tx_clean(tx))
return jsonify({ 'address': address, 'transactions': transactions })
# Utilities
def tx_clean(tx):
clean = {
'tx_hash': tx['tx_hash'],
'valid': True,
'accepted_amount': tx['formatted_amount']
}
if 'bitcoin_required' in tx:
clean['bought_amount'] = tx['bitcoin_required']
return clean
def read(address):
filename = data_dir_root + '/addr/' + address + '.json'
with open(filename, 'r') as f:
return json.load(f)
def exists(address):
filename = data_dir_root + '/addr/' + address + '.json'
return os.path.exists(filename)
| import os
import glob
from flask import Flask, request, jsonify, abort, json
data_dir_root = os.environ.get('DATADIR')
app = Flask(__name__)
app.debug = True
@app.route('/addresses')
def addresses():
currency_id = request.args.get('currency_id')
response = []
addr_glob = glob.glob(data_dir_root + '/addr/*.json')
for address_file in addr_glob:
with open(address_file, 'r') as f:
addr = json.load(f)
res = {
'address': addr['address']
}
if currency_id == '0':
btc_balance = [x['value'] for x in addr['balance'] if x['symbol'] == 'BTC'][0]
res['balance'] = float(btc_balance)
response.append(res)
else:
if currency_id == '1' or currency_id == '2':
msc_currency_id = str(int(currency_id) - 1) # Mastercoin-tools is off by one on currency id from the spec
if msc_currency_id in addr:
res['balance'] = float(addr[msc_currency_id]['balance'])
response.append(res)
json_response = json.dumps(response)
return json_response
@app.route('/transactions/<address>')
def transactions(address=None):
return ""
| agpl-3.0 | Python |
72222dd7dcf08ad13937edd1b01a75df0a71f67f | Bump app version to 2018.12 | kernelci/kernelci-backend,kernelci/kernelci-backend | app/handlers/__init__.py | app/handlers/__init__.py | __version__ = "2018.12"
__versionfull__ = __version__
| __version__ = "2018.11"
__versionfull__ = __version__
| lgpl-2.1 | Python |
073859067bcdd684d78eb0ddc2c4ecd1ed1b92f6 | Increase chunk size | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/userreports/const.py | corehq/apps/userreports/const.py | from __future__ import absolute_import
from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
from corehq.apps.change_feed import topics
REPORT_BUILDER_EVENTS_KEY = 'REPORT_BUILDER_EVENTS_KEY'
DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE = _(
'Sorry! There was a problem viewing your report. '
'This likely occurred because the application associated with the report was deleted. '
'In order to view this data using the Report Builder you will have to delete this report '
'and then build it again. Click below to delete it.'
)
UCR_SQL_BACKEND = "SQL"
UCR_ES_BACKEND = "ES"
UCR_LABORATORY_BACKEND = "LABORATORY"
UCR_ES_PRIMARY = "LAB_ES_PRIMARY"
UCR_BACKENDS = [UCR_SQL_BACKEND, UCR_ES_BACKEND]
UCR_SUPPORT_BOTH_BACKENDS = (UCR_LABORATORY_BACKEND, UCR_ES_PRIMARY)
DEFAULT_MAXIMUM_EXPANSION = 10
UCR_CELERY_QUEUE = 'ucr_queue'
UCR_INDICATOR_CELERY_QUEUE = 'ucr_indicator_queue'
KAFKA_TOPICS = (
topics.CASE,
topics.CASE_SQL,
topics.FORM,
topics.FORM_SQL,
topics.LOCATION,
topics.COMMCARE_USER,
)
VALID_REFERENCED_DOC_TYPES = [
'CommCareCase',
'CommCareUser',
'Location',
'XFormInstance',
]
ASYNC_INDICATOR_QUEUE_TIME = timedelta(minutes=5)
ASYNC_INDICATOR_CHUNK_SIZE = 100
XFORM_CACHE_KEY_PREFIX = 'xform_to_json_cache'
| from __future__ import absolute_import
from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
from corehq.apps.change_feed import topics
REPORT_BUILDER_EVENTS_KEY = 'REPORT_BUILDER_EVENTS_KEY'
DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE = _(
'Sorry! There was a problem viewing your report. '
'This likely occurred because the application associated with the report was deleted. '
'In order to view this data using the Report Builder you will have to delete this report '
'and then build it again. Click below to delete it.'
)
UCR_SQL_BACKEND = "SQL"
UCR_ES_BACKEND = "ES"
UCR_LABORATORY_BACKEND = "LABORATORY"
UCR_ES_PRIMARY = "LAB_ES_PRIMARY"
UCR_BACKENDS = [UCR_SQL_BACKEND, UCR_ES_BACKEND]
UCR_SUPPORT_BOTH_BACKENDS = (UCR_LABORATORY_BACKEND, UCR_ES_PRIMARY)
DEFAULT_MAXIMUM_EXPANSION = 10
UCR_CELERY_QUEUE = 'ucr_queue'
UCR_INDICATOR_CELERY_QUEUE = 'ucr_indicator_queue'
KAFKA_TOPICS = (
topics.CASE,
topics.CASE_SQL,
topics.FORM,
topics.FORM_SQL,
topics.LOCATION,
topics.COMMCARE_USER,
)
VALID_REFERENCED_DOC_TYPES = [
'CommCareCase',
'CommCareUser',
'Location',
'XFormInstance',
]
ASYNC_INDICATOR_QUEUE_TIME = timedelta(minutes=5)
ASYNC_INDICATOR_CHUNK_SIZE = 20
XFORM_CACHE_KEY_PREFIX = 'xform_to_json_cache'
| bsd-3-clause | Python |
087a0e23dc6387fbb622779e2ce174d6b3c8281f | raise version | xiezhen/brilws,xiezhen/brilws | brilws/_version.py | brilws/_version.py | __version__ = "0.10.6"
| __version__ = "0.10.5"
| mit | Python |
c9363986cb36fd73e5b9dcb1718292f1eed8af82 | Fix choice options for status and permission in ChallengeHost model. (#119) | taranjeet/EvalAI,taranjeet/EvalAI,taranjeet/EvalAI,taranjeet/EvalAI | apps/hosts/models.py | apps/hosts/models.py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from accounts.models import (TimeStampedModel, )
# from challenges.models import (Challenge, )
class ChallengeHostTeam(TimeStampedModel):
"""
Model representing the Host Team for a partiuclar challenge
"""
team_name = models.CharField(max_length=100,)
created_by = models.ForeignKey(User, related_name='challenge_host_team_creator')
class Meta:
app_label = 'hosts'
db_table = 'challenge_host_teams'
class ChallengeHost(TimeStampedModel):
# permission options
ADMIN = 'Admin'
READ = 'Read'
RESTRICTED = 'Restricted'
WRITE = 'Write'
# status options
ACCEPTED = 'Accepted'
DENIED = 'Denied'
PENDING = 'Pending'
SELF = 'Self'
UNKNOWN = 'Unknown'
PERMISSION_OPTIONS = (
(ADMIN, ADMIN),
(READ, READ),
(RESTRICTED, RESTRICTED),
(WRITE, WRITE),
)
STATUS_OPTIONS = (
(ACCEPTED, ACCEPTED),
(DENIED, DENIED),
(PENDING, PENDING),
(SELF, SELF),
(UNKNOWN, UNKNOWN),
)
user = models.ForeignKey(User)
team_name = models.ForeignKey('ChallengeHostTeam')
status = models.CharField(max_length=30, choices=STATUS_OPTIONS)
permissions = models.CharField(max_length=30, choices=PERMISSION_OPTIONS)
class Meta:
app_label = 'hosts'
db_table = 'challenge_host'
| from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from accounts.models import (TimeStampedModel, )
# from challenges.models import (Challenge, )
class ChallengeHostTeam(TimeStampedModel):
"""
Model representing the Host Team for a partiuclar challenge
"""
team_name = models.CharField(max_length=100,)
created_by = models.ForeignKey(User, related_name='challenge_host_team_creator')
class Meta:
app_label = 'hosts'
db_table = 'challenge_host_teams'
class ChallengeHost(TimeStampedModel):
# permission options
ADMIN = 'Admin'
READ = 'Read'
RESTRICTED = 'Restricted'
WRITE = 'Write'
# status options
ACCEPTED = 'Accepted'
DENIED = 'Denied'
PENDING = 'Pending'
SELF = 'Self'
UNKNOWN = 'Unknown'
PERMISSION_OPTIONS = (
(ADMIN, ADMIN),
(READ, READ),
(RESTRICTED, RESTRICTED),
(WRITE, WRITE),
)
STATUS_OPTIONS = (
(ACCEPTED, ACCEPTED),
(DENIED, DENIED),
(PENDING, PENDING),
(SELF, SELF),
(UNKNOWN, UNKNOWN),
)
user = models.ForeignKey(User)
team_name = models.ForeignKey('ChallengeHostTeam')
status = models.CharField(max_length=30, choices=PERMISSION_OPTIONS)
permissions = models.CharField(max_length=30, choices=STATUS_OPTIONS)
class Meta:
app_label = 'hosts'
db_table = 'challenge_host'
| bsd-3-clause | Python |
87116a8f14f948d9c1fb00cc1281bf71e6746158 | Update version to 9.2.4.dev0 [ci skip] | angr/archinfo | archinfo/__init__.py | archinfo/__init__.py | """
archinfo is a collection of classes that contain architecture-specific information.
It is useful for cross-architecture tools (such as pyvex).
"""
__version__ = "9.2.4.dev0"
if bytes is str:
raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.")
# NewType Declaration, see https://docs.python.org/3/library/typing.html#newtype
from typing import NewType
RegisterOffset = NewType('RegisterOffset', int)
TmpVar = NewType('TmpVar', int)
# This causes too much issues as a NewType, sot is a simple alias instead
# This means that is still legal to pass any str where a RegisterName is expected.
# The downside is that PyCharm will show the type as `str` when displaying the signature
RegisterName = str
# pylint: disable=wildcard-import
from .arch import *
from .defines import defines
from .arch_amd64 import ArchAMD64
from .arch_x86 import ArchX86
from .arch_arm import ArchARM, ArchARMEL, ArchARMHF, ArchARMCortexM
from .arch_aarch64 import ArchAArch64
from .arch_avr import ArchAVR8
from .arch_ppc32 import ArchPPC32
from .arch_ppc64 import ArchPPC64
from .arch_mips32 import ArchMIPS32
from .arch_mips64 import ArchMIPS64
from .arch_soot import ArchSoot
from .archerror import ArchError
from .arch_s390x import ArchS390X
| """
archinfo is a collection of classes that contain architecture-specific information.
It is useful for cross-architecture tools (such as pyvex).
"""
__version__ = "9.2.3.dev0"
if bytes is str:
raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.")
# NewType Declaration, see https://docs.python.org/3/library/typing.html#newtype
from typing import NewType
RegisterOffset = NewType('RegisterOffset', int)
TmpVar = NewType('TmpVar', int)
# This causes too much issues as a NewType, sot is a simple alias instead
# This means that is still legal to pass any str where a RegisterName is expected.
# The downside is that PyCharm will show the type as `str` when displaying the signature
RegisterName = str
# pylint: disable=wildcard-import
from .arch import *
from .defines import defines
from .arch_amd64 import ArchAMD64
from .arch_x86 import ArchX86
from .arch_arm import ArchARM, ArchARMEL, ArchARMHF, ArchARMCortexM
from .arch_aarch64 import ArchAArch64
from .arch_avr import ArchAVR8
from .arch_ppc32 import ArchPPC32
from .arch_ppc64 import ArchPPC64
from .arch_mips32 import ArchMIPS32
from .arch_mips64 import ArchMIPS64
from .arch_soot import ArchSoot
from .archerror import ArchError
from .arch_s390x import ArchS390X
| bsd-2-clause | Python |
0e73156e1868cf9800b71fcd425cd29aa17134d9 | Update __init__.py | franziz/artagger | artagger/__init__.py | artagger/__init__.py | # -*- coding: utf-8 -*-
from .Utility.Utils import getWordTag, readDictionary
from .InitialTagger.InitialTagger import initializeSentence
from .SCRDRlearner.SCRDRTree import SCRDRTree
from .SCRDRlearner.Object import FWObject
import os
import copy
import pickle
import codecs
class Word:
def __init__(self, **kwargs):
self.word = kwargs.get("word", None)
self.tag = kwargs.get("tag", None)
class RDRPOSTagger(SCRDRTree):
"""
RDRPOSTagger for a particular language
"""
def __init__(self):
self.root = None
def tagRawSentence(self, dictionary, rawLine):
line = initializeSentence(dictionary, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
sen.append(Word(word=word, tag=node.conclusion))
# sen.append(word + "/" + node.conclusion)
else:# Fired at root, return initialized tag
sen.append(Word(word=word, tag=tag))
sen.append(word + "/" + tag)
return sen
# return " ".join(sen)
class Tagger:
def __init__(self, **kwargs):
self.language = kwargs.get("language", "th")
self.text = kwargs.get("text", None)
self.model = {}
self.load_model()
def load_model(self):
self.model.update({"th":{
"rdr": codecs.open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.RDR"), "r", encoding="utf8"),
"dict": codecs.open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.DICT"), "r", encoding="utf8")
}})
def tag(self, text):
self.text = copy.copy(text)
tagger = RDRPOSTagger()
rdr_file = self.model[self.language]["rdr"]
dict_file = self.model[self.language]["dict"]
tagger.constructSCRDRtreeFromRDRfile(rdr_file.readlines())
dictionary = readDictionary(dict_file.readlines())
return tagger.tagRawSentence(dictionary, self.text)
| # -*- coding: utf-8 -*-
from .Utility.Utils import getWordTag, readDictionary
from .InitialTagger.InitialTagger import initializeSentence
from .SCRDRlearner.SCRDRTree import SCRDRTree
from .SCRDRlearner.Object import FWObject
import os
import copy
import pickle
class Word:
def __init__(self, **kwargs):
self.word = kwargs.get("word", None)
self.tag = kwargs.get("tag", None)
class RDRPOSTagger(SCRDRTree):
"""
RDRPOSTagger for a particular language
"""
def __init__(self):
self.root = None
def tagRawSentence(self, dictionary, rawLine):
line = initializeSentence(dictionary, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
sen.append(Word(word=word, tag=node.conclusion))
# sen.append(word + "/" + node.conclusion)
else:# Fired at root, return initialized tag
sen.append(Word(word=word, tag=tag))
sen.append(word + "/" + tag)
return sen
# return " ".join(sen)
class Tagger:
def __init__(self, **kwargs):
self.language = kwargs.get("language", "th")
self.text = kwargs.get("text", None)
self.model = {}
self.load_model()
def load_model(self):
self.model.update({"th":{
"rdr": open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.RDR"), "r", encoding="utf8"),
"dict": open(os.path.join(os.path.dirname(__file__), "Models", "POS", "Thai.DICT"), "r", encoding="utf8")
}})
def tag(self, text):
self.text = copy.copy(text)
tagger = RDRPOSTagger()
rdr_file = self.model[self.language]["rdr"]
dict_file = self.model[self.language]["dict"]
tagger.constructSCRDRtreeFromRDRfile(rdr_file.readlines())
dictionary = readDictionary(dict_file.readlines())
return tagger.tagRawSentence(dictionary, self.text)
| apache-2.0 | Python |
93612081ecaf5e8f99744e49762d88a95b3ec707 | Add Context to __init__.py | craigahobbs/chisel | chisel/__init__.py | chisel/__init__.py | #
# Copyright (C) 2012-2015 Craig Hobbs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
__version__ = '0.9.2'
from .action import \
action, \
Action, \
ActionError
from .app import \
Application, \
Context
from .doc import \
DocAction, \
DocPage, \
Element
from .model import \
JsonDate, \
JsonDatetime, \
JsonFloat, \
JsonUUID, \
AttributeValidationError, \
ValidationError, \
VALIDATE_DEFAULT, \
VALIDATE_QUERY_STRING, \
VALIDATE_JSON_INPUT, \
VALIDATE_JSON_OUTPUT
from .request import \
request, \
Request
from .spec import \
SpecParser, \
SpecParserError
from .url import \
decode_query_string, \
encode_query_string
from .util import \
TZLOCAL, \
TZUTC
| #
# Copyright (C) 2012-2015 Craig Hobbs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
__version__ = '0.9.1'
from .action import \
action, \
Action, \
ActionError
from .app import \
Application
from .doc import \
DocAction, \
DocPage, \
Element
from .model import \
JsonDate, \
JsonDatetime, \
JsonFloat, \
JsonUUID, \
AttributeValidationError, \
ValidationError, \
VALIDATE_DEFAULT, \
VALIDATE_QUERY_STRING, \
VALIDATE_JSON_INPUT, \
VALIDATE_JSON_OUTPUT
from .request import \
request, \
Request
from .spec import \
SpecParser, \
SpecParserError
from .url import \
decode_query_string, \
encode_query_string
from .util import \
TZLOCAL, \
TZUTC
| mit | Python |
7d8bab4d1af012e98b0bab8d72bb180814b13c83 | Set version to 0.1.27a1 | d9pouces/django-powerdns-manager,gnotaras/django-powerdns-manager,d9pouces/django-powerdns-manager,gnotaras/django-powerdns-manager | src/powerdns_manager/__init__.py | src/powerdns_manager/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of django-powerdns-manager.
#
# django-powerdns-manager is a web based PowerDNS administration panel.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-powerdns-manager
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-powerdns-manager
#
# Copyright 2012 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Scheme: <major>.<minor>.<maintenance>.<maturity>.<revision>
# maturity: final/beta/alpha
VERSION = (0, 1, 27, 'alpha', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2] is not None:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3] != 'final':
if VERSION[4] > 0:
version = '%s%s%s' % (version, VERSION[3][0], VERSION[4])
else:
version = '%s%s' % (version, VERSION[3][0])
return version
__version__ = get_version()
def get_status_classifier():
if VERSION[3] == 'final':
return 'Development Status :: 5 - Production/Stable'
elif VERSION[3] == 'beta':
return 'Development Status :: 4 - Beta'
elif VERSION[3] == 'alpha':
return 'Development Status :: 3 - Alpha'
raise NotImplementedError
| # -*- coding: utf-8 -*-
#
# This file is part of django-powerdns-manager.
#
# django-powerdns-manager is a web based PowerDNS administration panel.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-powerdns-manager
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-powerdns-manager
#
# Copyright 2012 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Scheme: <major>.<minor>.<maintenance>.<maturity>.<revision>
# maturity: final/beta/alpha
VERSION = (0, 1, 26, 'alpha', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2] is not None:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3] != 'final':
if VERSION[4] > 0:
version = '%s%s%s' % (version, VERSION[3][0], VERSION[4])
else:
version = '%s%s' % (version, VERSION[3][0])
return version
__version__ = get_version()
def get_status_classifier():
if VERSION[3] == 'final':
return 'Development Status :: 5 - Production/Stable'
elif VERSION[3] == 'beta':
return 'Development Status :: 4 - Beta'
elif VERSION[3] == 'alpha':
return 'Development Status :: 3 - Alpha'
raise NotImplementedError
| apache-2.0 | Python |
0031c83a571341f3031a4acb1b723658f64d4e9e | Update to v1.3.19 | jackzhao-mj/ok-client,jathak/ok-client,Cal-CS-61A-Staff/ok-client | client/__init__.py | client/__init__.py | __version__ = 'v1.3.19'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| __version__ = 'v1.3.18'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| apache-2.0 | Python |
f481a03a8985f6731bcf9314d72467abab4b1ed2 | change default freeglut library name to glut | tuttleofx/sconsProject | autoconf/freeglut.py | autoconf/freeglut.py | from _external import *
from gl import *
from glu import *
freeglut = LibWithHeaderChecker(
'glut',
['GL/freeglut.h'],
'c',
dependencies=[gl,glu]
)
| from _external import *
from gl import *
from glu import *
freeglut = LibWithHeaderChecker(
'freeglut',
['GL/freeglut.h'],
'c',
dependencies=[gl,glu]
)
| mit | Python |
78d2e811051e1b52c6e5915e7c4d80134d4a7cbb | Optimize prime-number check for Python (#33) | mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview,mre/the-coding-interview | problems/prime-number/prime-number.py | problems/prime-number/prime-number.py | from math import sqrt
def is_prime(n):
if n <= 1:
return False
elif n in [2, 3]:
return True
# To understand the statement below, please visit https://github.com/mre/the-coding-interview/pull/33
elif n % 6 not in [1, 5]:
return False
for i in range(3, int(sqrt(n))+1, 2): # change range to xrange for python2
if n % i == 0:
return False
return True
print(is_prime(13))
| from math import sqrt
def is_prime(n):
if n <= 1:
return False
elif n == 2:
return True
elif n % 2 == 0:
return False
for i in xrange(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
| mit | Python |
d577c02a706bac1ed94aa91fbe2494f8d0c0f581 | add in import | thomasyu888/Genie,thomasyu888/Genie,thomasyu888/Genie,thomasyu888/Genie | processing/example_filetype_format.py | processing/example_filetype_format.py | import logging
import multiprocessing
import pandas as pd
import os
#import packages
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class FileTypeFormat(object):
_process_kwargs = ["newPath", "databaseSynId"]
_fileType = "fileType"
_validation_kwargs = []
def __init__(self, syn, center, poolSize=1):
self.syn = syn
self.center = center
#self.pool = multiprocessing.Pool(poolSize)
def readFile(self, filePathList):
filePath = filePathList[0]
df = pd.read_csv(filePath,sep="\t",comment="#")
return(df)
def _validateFilename(self, filePath):
pass
def validateFilename(self, filePath):
self._validateFilename(filePath)
return(self._fileType)
def process_steps(self, filePath, *args, **kwargs):
pass
def preprocess(self, filePath, *args, **kwargs):
# - clinical
# - maf
# - vcf
return(dict())
def process(self, filePath, *args, **kwargs):
preprocess_args = self.preprocess(filePath, **kwargs)
kwargs.update(preprocess_args)
mykwargs = {}
for required_parameter in self._process_kwargs:
assert required_parameter in kwargs.keys(), "%s not in parameter list" % required_parameter
mykwargs[required_parameter] = kwargs[required_parameter]
path = self.process_steps(filePath, **mykwargs)
return(path)
def _validate(self, df):
total_error =""
warning = ""
logger.info("NO VALIDATION for %s files" % self._fileType)
return(total_error, warning)
def _call_validate(self, df, **kwargs):
return(self._validate(df))
# def validate_steps(self, filePathList, **kwargs):
# total_error = ""
# warning = ""
# logger.info("VALIDATING %s" % os.path.basename(",".join(filePathList)))
# df = readFile(filePathList)
# return(self._validate(df))
def validate(self, filePathList, **kwargs):
mykwargs = {}
for required_parameter in self._validation_kwargs:
assert required_parameter in kwargs.keys(), "%s not in parameter list" % required_parameter
mykwargs[required_parameter] = kwargs[required_parameter]
logger.info("VALIDATING %s" % os.path.basename(",".join(filePathList)))
#total_error, warning = self.validate_steps(filePathList, **mykwargs)
df = self.readFile(filePathList)
total_error, warning = self._call_validate(df, **mykwargs)
return(total_error, warning) | import logging
import multiprocessing
import pandas as pd
#import packages
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class FileTypeFormat(object):
_process_kwargs = ["newPath", "databaseSynId"]
_fileType = "fileType"
_validation_kwargs = []
def __init__(self, syn, center, poolSize=1):
self.syn = syn
self.center = center
#self.pool = multiprocessing.Pool(poolSize)
def readFile(self, filePathList):
filePath = filePathList[0]
df = pd.read_csv(filePath,sep="\t",comment="#")
return(df)
def _validateFilename(self, filePath):
pass
def validateFilename(self, filePath):
self._validateFilename(filePath)
return(self._fileType)
def process_steps(self, filePath, *args, **kwargs):
pass
def preprocess(self, filePath, *args, **kwargs):
# - clinical
# - maf
# - vcf
return(dict())
def process(self, filePath, *args, **kwargs):
preprocess_args = self.preprocess(filePath, **kwargs)
kwargs.update(preprocess_args)
mykwargs = {}
for required_parameter in self._process_kwargs:
assert required_parameter in kwargs.keys(), "%s not in parameter list" % required_parameter
mykwargs[required_parameter] = kwargs[required_parameter]
path = self.process_steps(filePath, **mykwargs)
return(path)
def _validate(self, df):
total_error =""
warning = ""
logger.info("NO VALIDATION for %s files" % self._fileType)
return(total_error, warning)
def _call_validate(self, df, **kwargs):
return(self._validate(df))
# def validate_steps(self, filePathList, **kwargs):
# total_error = ""
# warning = ""
# logger.info("VALIDATING %s" % os.path.basename(",".join(filePathList)))
# df = readFile(filePathList)
# return(self._validate(df))
def validate(self, filePathList, **kwargs):
mykwargs = {}
for required_parameter in self._validation_kwargs:
assert required_parameter in kwargs.keys(), "%s not in parameter list" % required_parameter
mykwargs[required_parameter] = kwargs[required_parameter]
logger.info("VALIDATING %s" % os.path.basename(",".join(filePathList)))
#total_error, warning = self.validate_steps(filePathList, **mykwargs)
df = self.readFile(filePathList)
total_error, warning = self._call_validate(df, **mykwargs)
return(total_error, warning) | mit | Python |
b85e16177b08fcf57ede8f670472e9540c661d13 | FIX prod ref required | ingadhoc/account-analytic,ingadhoc/sale,adhoc-dev/account-financial-tools,ingadhoc/odoo-addons,ingadhoc/product,sysadminmatmoz/ingadhoc,adhoc-dev/odoo-addons,ingadhoc/product,ClearCorp/account-financial-tools,ingadhoc/sale,ingadhoc/sale,adhoc-dev/odoo-addons,ingadhoc/sale,ingadhoc/partner,dvitme/odoo-addons,sysadminmatmoz/ingadhoc,bmya/odoo-addons,ingadhoc/stock,sysadminmatmoz/ingadhoc,ingadhoc/odoo-addons,adhoc-dev/account-financial-tools,dvitme/odoo-addons,dvitme/odoo-addons,ingadhoc/account-invoicing,bmya/odoo-addons,ingadhoc/account-payment,ingadhoc/odoo-addons,ingadhoc/account-financial-tools,adhoc-dev/odoo-addons,ClearCorp/account-financial-tools,bmya/odoo-addons | product_reference_required/product.py | product_reference_required/product.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
class product_template(models.Model):
_inherit = "product.template"
default_code = fields.Char(
required=True,
)
@api.model
def create(self, vals):
"""
If we create from template we send default code by context
"""
default_code = vals.get('default_code', False)
if default_code:
return super(product_template, self.with_context(
default_default_code=default_code)).create(vals)
return super(product_template, self).create(vals)
class product_product(models.Model):
_inherit = "product.product"
default_code = fields.Char(
required=True,
)
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields
class product_template(models.Model):
_inherit = "product.template"
default_code = fields.Char(
required=True,
)
class product_product(models.Model):
_inherit = "product.product"
default_code = fields.Char(
required=True,
)
| agpl-3.0 | Python |
6017a7e5de6fc0d0ef9797d980e634e8fe88cd89 | Create stock data dictionary | jrn223/Freestyle | Stock_market_data.py | Stock_market_data.py | # for email functionality, credit @s2t2
import os
import sendgrid
from sendgrid.helpers.mail import * # source of Email, Content, Mail, etc.
# for day of week
import datetime
# to query Google stock data
from pandas_datareader import data
from datetime import date, timedelta
stock_data = []
#Stock data for Apple, Amazon, Activision Blizzard, Hologic Inc, Ligand Pharmaceuticals Inc, Microsoft, Ferrari, T. Rowe Price, Tesla, Vivint Solar Inc
symbols = ['AAPL', 'AMZN', 'ATVI', 'HOLX', 'LGND', 'MSFT', 'RACE', 'TROW', 'TSLA', 'VSLR']
data_source = 'google'
day_of_week = datetime.datetime.today().weekday()
# print(day_of_week) tested day of week functionality working
monday = [0]
other_weekdays = [1,2,3,4]
if day_of_week in monday:
start = str(date.today() - timedelta(days=3))
end = str(date.today())
elif day_of_week in other_weekdays:
start = str(date.today() - timedelta(days=1))
end = str(date.today())
response = data.DataReader(symbols, data_source, start, end)
daily_closing_prices = response.ix["Close"]
print(daily_closing_prices)
def stock_data_builder (ticker_symbol):
stock = {}
stock["ticker"] = ticker_symbol
stock["today_close"] = daily_closing_prices.iloc[1][ticker_symbol]
stock["previous_day_close"] = daily_closing_prices.iloc[0][ticker_symbol]
stock["difference"] = stock["today_close"] - stock["previous_day_close"]
stock_data.append(stock)
for ticker in symbols:
stock_data_builder(ticker)
print(stock_data)
# AUTHENTICATE
# SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
#
# sg = sendgrid.SendGridAPIClient(apikey = SENDGRID_API_KEY)
#
# # COMPILE REQUEST PARAMETERS
#
# subject = "Hello World from the SendGrid Python Library!"
# my_email = Email("jrn223@stern.nyu.edu")
# from_email = my_email
# to_email = my_email
# content = Content("text/plain", "Hello, Email!")
# mail = Mail(from_email, subject, to_email, content)
#
# # ISSUE REQUEST
#
# response = sg.client.mail.send.post(request_body=mail.get())
#
# # PARSE RESPONSE
#
# print(response.status_code)
# print(response.body)
# print(response.headers)
| # for email functionality, credit @s2t2
import os
import sendgrid
from sendgrid.helpers.mail import * # source of Email, Content, Mail, etc.
# for day of week
import datetime
# to query Google stock data
from pandas_datareader import data
from datetime import date, timedelta
#Stock data for Apple, Amazon, Activision Blizzard, Hologic Inc, Ligand Pharmaceuticals Inc, Microsoft, Ferrari, T. Rowe Price, Tesla, Vivint Solar Inc
symbols = ['AAPL', 'AMZN', 'ATVI', 'HOLX', 'LGND', 'MSFT', 'RACE', 'TROW', 'TSLA', 'VSLR']
data_source = 'google'
day_of_week = datetime.datetime.today().weekday()
# print(day_of_week) tested day of week functionality working
monday = [0]
other_weekdays = [1,2,3,4]
if day_of_week in monday:
start = str(date.today() - timedelta(days=3))
end = str(date.today())
elif day_of_week in other_weekdays:
start = str(date.today() - timedelta(days=1))
end = str(date.today())
response = data.DataReader(symbols, data_source, start, end)
daily_closing_prices = response.ix["Close"]
print(daily_closing_prices)
def differnceclosingprice (ticker_symbols):
yesterday_price = daily_closing_prices.iloc[0][ticker_symbols]
today_price = daily_closing_prices.iloc[1][ticker_symbols]
return today_price - yesterday_price
for ticker in symbols:
print(differnceclosingprice(ticker))
# aapl_yesterday = daily_closing_prices.iloc[0]['AAPL']
# aapl_today = daily_closing_prices.iloc[1]['AAPL']
# print(aapl_today - aapl_yesterday)
# AUTHENTICATE
# SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
#
# sg = sendgrid.SendGridAPIClient(apikey = SENDGRID_API_KEY)
#
# # COMPILE REQUEST PARAMETERS
#
# subject = "Hello World from the SendGrid Python Library!"
# my_email = Email("jrn223@stern.nyu.edu")
# from_email = my_email
# to_email = my_email
# content = Content("text/plain", "Hello, Email!")
# mail = Mail(from_email, subject, to_email, content)
#
# # ISSUE REQUEST
#
# response = sg.client.mail.send.post(request_body=mail.get())
#
# # PARSE RESPONSE
#
# print(response.status_code)
# print(response.body)
# print(response.headers)
| mit | Python |
988ce79b3de76d46c482995bb4b8dc724e89c718 | Add configuration parameter whether or not to use astropy's coordinate transformations where relevant | jobovy/galpy,jobovy/galpy,jobovy/galpy,jobovy/galpy | galpy/util/config.py | galpy/util/config.py | import os, os.path
try:
import configparser
except:
from six.moves import configparser
_APY_LOADED= True
try:
from astropy import units
except ImportError:
_APY_LOADED= False
# The default configuration
default_configuration= {'astropy-units':'False',
'astropy-coords':'True',
'ro':'8.',
'vo':'220.'}
default_filename= os.path.join(os.path.expanduser('~'),'.galpyrc')
def write_default(filename):
writeconfig= configparser.ConfigParser()
# Write different sections
writeconfig.add_section('normalization')
writeconfig.set('normalization','ro',
default_configuration['ro'])
writeconfig.set('normalization','vo',
default_configuration['vo'])
writeconfig.add_section('astropy')
writeconfig.set('astropy','astropy-units',
default_configuration['astropy-units'])
writeconfig.set('astropy','astropy-coords',
default_configuration['astropy-coords'])
with open(filename,'w') as configfile:
writeconfig.write(configfile)
return None
# Read the configuration file
__config__= configparser.ConfigParser(default_configuration)
if __config__.read([default_filename,'.galpyrc']) == []:
write_default(default_filename)
__config__.read(default_filename)
# Set configuration variables on the fly
def set_ro(ro):
"""
NAME:
set_ro
PURPOSE:
set the global configuration value of ro (distance scale)
INPUT:
ro - scale in kpc or astropy Quantity
OUTPUT:
(none)
HISTORY:
2016-01-05 - Written - Bovy (UofT)
"""
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
__config__.set('normalization','ro',str(ro))
def set_vo(vo):
"""
NAME:
set_vo
PURPOSE:
set the global configuration value of vo (velocity scale)
INPUT:
vo - scale in km/s or astropy Quantity
OUTPUT:
(none)
HISTORY:
2016-01-05 - Written - Bovy (UofT)
"""
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
__config__.set('normalization','vo',str(vo))
| import os, os.path
try:
import configparser
except:
from six.moves import configparser
_APY_LOADED= True
try:
from astropy import units
except ImportError:
_APY_LOADED= False
# The default configuration
default_configuration= {'astropy-units':'False',
'ro':'8.',
'vo':'220.'}
default_filename= os.path.join(os.path.expanduser('~'),'.galpyrc')
def write_default(filename):
writeconfig= configparser.ConfigParser()
# Write different sections
writeconfig.add_section('normalization')
writeconfig.set('normalization','ro',
default_configuration['ro'])
writeconfig.set('normalization','vo',
default_configuration['vo'])
writeconfig.add_section('astropy')
writeconfig.set('astropy','astropy-units',
default_configuration['astropy-units'])
with open(filename,'w') as configfile:
writeconfig.write(configfile)
return None
# Read the configuration file
__config__= configparser.ConfigParser(default_configuration)
if __config__.read([default_filename,'.galpyrc']) == []:
write_default(default_filename)
__config__.read(default_filename)
# Set configuration variables on the fly
def set_ro(ro):
"""
NAME:
set_ro
PURPOSE:
set the global configuration value of ro (distance scale)
INPUT:
ro - scale in kpc or astropy Quantity
OUTPUT:
(none)
HISTORY:
2016-01-05 - Written - Bovy (UofT)
"""
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
__config__.set('normalization','ro',str(ro))
def set_vo(vo):
"""
NAME:
set_vo
PURPOSE:
set the global configuration value of vo (velocity scale)
INPUT:
vo - scale in km/s or astropy Quantity
OUTPUT:
(none)
HISTORY:
2016-01-05 - Written - Bovy (UofT)
"""
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
__config__.set('normalization','vo',str(vo))
| bsd-3-clause | Python |
dae9d7d67aaf2ab8d39b232d243d860d9597bbd2 | Add error when serializer setup has error | NorakGithub/django-excel-tools | django_excel_tools/exceptions.py | django_excel_tools/exceptions.py | class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
class SerializerConfigError(BaseExcelError):
pass
| class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
| mit | Python |
b0ce49d2ceb93b9a31472582e513b4a7a3c33a3e | Fix warn | klen/fquest,klen/tweetchi,klen/Flask-Foundation | base/auth/manager.py | base/auth/manager.py | from flask import Blueprint
from flask_login import LoginManager, login_required, logout_user, login_user, current_user
from flask_principal import Principal, identity_changed, Identity, AnonymousIdentity, identity_loaded, UserNeed, RoleNeed
from ..ext import db
from .models import User
class UserManager(Blueprint):
def __init__(self, *args, **kwargs):
self._login_manager = None
self._principal = None
self.app = None
super(UserManager, self).__init__(*args, **kwargs)
def register(self, app, *args, **kwargs):
" Activate loginmanager and principal. "
if not self._login_manager or self.app != app:
self._login_manager = LoginManager()
self._login_manager.user_callback = self.user_loader
self._login_manager.init_app(app)
self._login_manager.login_view = app.config.get('AUTH_LOGIN_VIEW', 'code.index')
self._login_manager.login_message = u'You need to be signed in for this page.'
self.app = app
if not self._principal:
self._principal = Principal(app)
identity_loaded.connect(self.identity_loaded)
super(UserManager, self).register(app, *args, **kwargs)
@staticmethod
def user_loader(pk):
return User.query.options(db.joinedload(User.roles)).get(pk)
@staticmethod
def login_required(fn):
return login_required(fn)
def logout(self):
identity_changed.send(self.app, identity=AnonymousIdentity())
return logout_user()
def login(self, user):
identity_changed.send(self.app, identity=Identity(user.id))
return login_user(user)
@staticmethod
def identity_loaded(sender, identity):
identity.user = current_user
# Add the UserNeed to the identity
if current_user.is_authenticated():
identity.provides.add(UserNeed(current_user.id))
# Assuming the User model has a list of roles, update the
# identity with the roles that the user provides
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
| from flask import Blueprint
from flask_login import LoginManager, login_required, logout_user, login_user, current_user
from flask_principal import Principal, identity_changed, Identity, AnonymousIdentity, identity_loaded, UserNeed, RoleNeed
from ..ext import db
from .models import User
class UserManager(Blueprint):
def __init__(self, *args, **kwargs):
self._login_manager = None
self._principal = None
self.app = None
super(UserManager, self).__init__(*args, **kwargs)
def register(self, app, *args, **kwargs):
" Activate loginmanager and principal. "
if not self._login_manager or self.app != app:
self._login_manager = LoginManager()
self._login_manager.user_callback = self.user_loader
self._login_manager.setup_app(app)
self._login_manager.login_view = app.config.get('AUTH_LOGIN_VIEW', 'code.index')
self._login_manager.login_message = u'You need to be signed in for this page.'
self.app = app
if not self._principal:
self._principal = Principal(app)
identity_loaded.connect(self.identity_loaded)
super(UserManager, self).register(app, *args, **kwargs)
@staticmethod
def user_loader(pk):
return User.query.options(db.joinedload(User.roles)).get(pk)
@staticmethod
def login_required(fn):
return login_required(fn)
def logout(self):
identity_changed.send(self.app, identity=AnonymousIdentity())
return logout_user()
def login(self, user):
identity_changed.send(self.app, identity=Identity(user.id))
return login_user(user)
@staticmethod
def identity_loaded(sender, identity):
identity.user = current_user
# Add the UserNeed to the identity
if current_user.is_authenticated():
identity.provides.add(UserNeed(current_user.id))
# Assuming the User model has a list of roles, update the
# identity with the roles that the user provides
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
| bsd-3-clause | Python |
edde0e7ba52d3f2e7b1c5d15f0c92a0545df33fd | fix rosbag_helper script executer | startcode/apollo,startcode/apollo,startcode/apollo,startcode/apollo,startcode/apollo,startcode/apollo | docs/demo_guide/rosbag_helper.py | docs/demo_guide/rosbag_helper.py | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import urllib
import os
URL_LIST = [
"https://github.com/ApolloAuto/apollo/releases/download/v1.5.0/demo_1.5.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v2.0.0/demo_2.0.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v2.0.0/apollo_2.0_camera_sample.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v2.5.0/demo_2.5.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v1.0.0/demo_1.0.bag",
]
URL_DICT = {}
for link in URL_LIST:
name = link.split("/")[-1]
URL_DICT[name] = link
def retrieve_rosbag(bagname):
if bagname not in URL_DICT:
print(
"bag[%s] is unknown, use one of the following rosbag names:\n%s" %
(bagname, ", ".join([name for name in URL_DICT.keys()])))
return False
url = URL_DICT[bagname]
print("Downloading from %s" % url)
ret = os.system('wget %s -O %s' % (url, bagname))
return ret == 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='retrieve demo rosbag file from remote')
parser.add_argument(
'name',
type=str,
help='rosbag names. You can choose one of [%s]' % ", ".join(
[name for name in URL_DICT.keys()]))
args = parser.parse_args()
if retrieve_rosbag(args.name):
print("Download %s success" % args.name)
else:
print("Download %s failed" % args.name)
| #!/usr/bin/env bash
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import urllib
import os
URL_LIST = [
"https://github.com/ApolloAuto/apollo/releases/download/v1.5.0/demo_1.5.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v2.0.0/demo_2.0.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v2.0.0/apollo_2.0_camera_sample.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v2.5.0/demo_2.5.bag",
"https://github.com/ApolloAuto/apollo/releases/download/v1.0.0/demo_1.0.bag",
]
URL_DICT = {}
for link in URL_LIST:
name = link.split("/")[-1]
URL_DICT[name] = link
def retrieve_rosbag(bagname):
if bagname not in URL_DICT:
print(
"bag[%s] is unknown, use one of the following rosbag names:\n%s" %
(bagname, ", ".join([name for name in URL_DICT.keys()])))
return False
url = URL_DICT[bagname]
print("Downloading from %s" % url)
ret = os.system('wget %s -O %s' % (url, bagname))
return ret == 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='retrieve demo rosbag file from remote')
parser.add_argument(
'name',
type=str,
help='rosbag names. You can choose one of [%s]' % ", ".join(
[name for name in URL_DICT.keys()]))
args = parser.parse_args()
if retrieve_rosbag(args.name):
print("Download %s success" % args.name)
else:
print("Download %s failed" % args.name)
| apache-2.0 | Python |
3c33e0379b278f21e7d8d149312bd4f2eef48f1f | add fix in function node.clone() | elastic-event-components/e2c,elastic-event-components/e2c,elastic-event-components/e2c,elastic-event-components/e2c | e2c/python/e2c/node.py | e2c/python/e2c/node.py | from inspect import getfullargspec
from typing import Callable, Any, Dict, List
class Node(object):
def __init__(self, comp, name: str, callable: Callable) -> None:
self.name = name
self.comp = comp
self.callable = callable
self.nodes: Dict[str, List['Node']] = {}
self._specs = []
def on(self, channel: str, node: 'Node'):
if channel not in self.nodes:
self.nodes[channel] = []
self.nodes[channel].append(node)
def run(self, value=None):
from .resolve import resolve
params = resolve(self, [value])
if self.comp.on_trace and self.comp.activate_trace:
self.comp.on_trace(self.name)
self.callable(*params)
def run_with_params(self, *params):
if self.comp.on_trace and self.comp.activate_trace:
self.comp.on_trace(self.name)
return self.callable(*params)
def clone(self) -> 'Node':
node = Node(self.comp, self.name, self.callable)
for name, nodes in self.nodes.items():
for n in nodes:
node.on(name, n)
return node
@property
def specs(self):
if not self._specs and self.callable:
result = getfullargspec(self.callable)
self._specs = dict([(a, result.annotations.get(a, Any)) for a in result.args])
return self._specs
| from inspect import getfullargspec
from typing import Callable, Any, Dict, List
class Node(object):
def __init__(self, comp, name: str, callable: Callable) -> None:
self.name = name
self.comp = comp
self.callable = callable
self.nodes: Dict[str, List['Node']] = {}
self._specs = []
def on(self, channel: str, node: 'Node'):
if channel not in self.nodes:
self.nodes[channel] = []
self.nodes[channel].append(node)
def run(self, value=None):
from .resolve import resolve
params = resolve(self, [value])
if self.comp.on_trace and self.comp.activate_trace:
self.comp.on_trace(self.name)
self.callable(*params)
def run_with_params(self, *params):
if self.comp.on_trace and self.comp.activate_trace:
self.comp.on_trace(self.name)
return self.callable(*params)
def clone(self) -> 'Node':
node = Node(self.comp, self.name, self.callable)
for name, nodes in self.nodes.items():
for n in nodes:
node.on(name, n.clone())
return node
@property
def specs(self):
if not self._specs and self.callable:
result = getfullargspec(self.callable)
self._specs = dict([(a, result.annotations.get(a, Any)) for a in result.args])
return self._specs
| apache-2.0 | Python |
617a8d208c0f6e9af36f9e90ea15ca36a46384d2 | Fix line too long | Alignak-monitoring-contrib/alignak-webui,Alignak-monitoring-contrib/alignak-webui,Alignak-monitoring-contrib/alignak-webui | bin/alignak_webui.py | bin/alignak_webui.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is used to run the application in production environment with WSGI server.
With uWSGI:
uwsgi --plugin python --wsgi-file bin/alignak_webui.py --callable app \
--socket 0.0.0.0:5001 --protocol=http --enable-threads -p 1
"""
import alignak_webui.app
from alignak_webui import webapp as app
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is used to run the application in production environment with WSGI server.
With uWSGI:
uwsgi --wsgi-file alignak_webui.py --callable app --socket 0.0.0.0:8868 --protocol=http --enable-threads
"""
import alignak_webui.app
from alignak_webui import webapp as app
| agpl-3.0 | Python |
369b7412f237d7d8e7280b7c4ba03b86655d45aa | fix some issues with nonstandard module imports | CI-Connect/connect-client,CI-Connect/connect-client,CI-Connect/connect-client | connect/lib/connect/extensions/debug.py | connect/lib/connect/extensions/debug.py |
def run(opts, args, **kwargs):
from IPython.Shell import IPShellEmbed as embed
if htcondor:
import classad
schedd = htcondor.Schedd()
r = schedd.query()
params = {}
for result in r:
for k in result.keys():
if k in params:
params[k] += 1
else:
params[k] = 1
common = []
for k, v in params.items():
if v == len(r):
common.append(k)
common.sort()
embed()()
| import htcondor
import classad
def run(opts, args, **kwargs):
from IPython.Shell import IPShellEmbed as embed
schedd = htcondor.Schedd()
r = schedd.query()
params = {}
for result in r:
for k in result.keys():
if k in params:
params[k] += 1
else:
params[k] = 1
common = []
for k, v in params.items():
if v == len(r):
common.append(k)
common.sort()
embed()()
| apache-2.0 | Python |
80d6c3de821a77985d434fcbe50b379f255b1b2e | set version to 1.1.0 | femueller/python-n26 | n26/__init__.py | n26/__init__.py | __version__ = '1.1.0'
| __version__ = '1.0.0'
| mit | Python |
3feeddcf34928e9c1bca4c9de0f5028686085c25 | Update messages API | Grum-Hackdee/grum-web,Grum-Hackdee/grum-web,Grum-Hackdee/grum-web,Grum-Hackdee/grum-web | grum/api/messages.py | grum/api/messages.py | from flask import jsonify
from flask.ext.restful import Resource
from .models import Message
class Messages(Resource):
def get(self, message_id):
msg = Message.query.filter_by(id=message_id).first_or_404()
return jsonify(message={
'id': msg.id,
'from': msg.sender,
'from_nice': msg.sender_nice,
'timestamp': msg.sent_at,
'subject': msg.subject,
'html': msg.html,
'html_stripped': msg.html_stripped,
'plaintext': msg.plaintext,
'plaintext_stripped': msg.plaintext_stripped,
'plaintext_stripped_signature': msg.plaintext_stripped_signature
}) | from flask.ext.restful import Resource
class Messages(Resource):
def get(self):
return "hello friend" | mit | Python |
33f050149cbb4d89f45505322511b65797456e74 | Remove filter_list from ndb | PinaeOS/ndb-py,node-db/ndb-py | ndb/__init__.py | ndb/__init__.py | #coding=utf-8
import statement
import common
import operate
__version__ = "1.0"
def read(filename):
return common.read(filename)
def read_string(data):
return common.read_string(data)
def write_node(filename, name, node, indent_flag = '\t'):
common.write_node(filename, name, node, indent_flag)
def print_node(name, node, indent_flag = '\t'):
return common.print_node(name, node, indent_flag)
def print_xml(name, node, indent_flag = '\t'):
return common.print_xml(name, node, indent_flag)
def execute(node, query, action = None):
return statement.Statement().execute(node, query, action)
| #coding=utf-8
import statement
import common
import operate
__version__ = "1.0"
def read(filename):
return common.read(filename)
def read_string(data):
return common.read_string(data)
def write_node(filename, name, node, indent_flag = '\t'):
common.write_node(filename, name, node, indent_flag)
def print_node(name, node, indent_flag = '\t'):
return common.print_node(name, node, indent_flag)
def print_xml(name, node, indent_flag = '\t'):
return common.print_xml(name, node, indent_flag)
def execute(node, query, action = None):
return statement.Statement().execute(node, query, action)
def filter_list(table, query=None, union=False, sort_key=None):
return operate.filte(table, query, union, sort_key) | apache-2.0 | Python |
385f6593fa71f2de120e431fbed12f88565b2f46 | remove basename from AtomicBlobs.put as well | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/blobs/atomic.py | corehq/blobs/atomic.py | from corehq.blobs import DEFAULT_BUCKET
from corehq.blobs.exceptions import InvalidContext
class AtomicBlobs(object):
"""A blob db wrapper that can put and delete blobs atomically
Usage:
with AtomicBlobs(get_blob_db()) as db:
# do stuff here that puts or deletes blobs
db.delete(old_blob_id)
info = db.put(content)
save(info, deleted=old_blob_id)
If an exception occurs inside the `AtomicBlobs` context then all
blob write operations (puts and deletes) will be rolled back.
"""
def __init__(self, db):
self.db = db
self.puts = None
self.deletes = None
def put(self, content, bucket=DEFAULT_BUCKET):
if self.puts is None:
raise InvalidContext("AtomicBlobs context is not active")
info = self.db.put(content, bucket=bucket)
self.puts.append((info, bucket))
return info
def get(self, *args, **kw):
return self.db.get(*args, **kw)
def delete(self, *args, **kw):
"""Delete a blob or bucket of blobs
NOTE blobs will not actually be deleted until the context exits,
so subsequent gets inside the context will return an object even
though the blob or bucket has been queued for deletion.
"""
if self.puts is None:
raise InvalidContext("AtomicBlobs context is not active")
self.db.get_args_for_delete(*args, **kw) # validate args
self.deletes.append((args, kw))
return None # result is unknown
def copy_blob(self, *args, **kw):
raise NotImplementedError
def get_identifier(self, *args, **kw):
return self.db.get_identifier(*args, **kw)
def __enter__(self):
self.puts = []
self.deletes = []
return self
def __exit__(self, exc_type, exc_value, tb):
puts, deletes = self.puts, self.deletes
self.puts = None
self.deletes = None
if exc_type is None:
for args, kw in deletes:
self.db.delete(*args, **kw)
else:
for info, bucket in puts:
self.db.delete(info.identifier, bucket)
| from corehq.blobs import DEFAULT_BUCKET
from corehq.blobs.exceptions import InvalidContext
class AtomicBlobs(object):
"""A blob db wrapper that can put and delete blobs atomically
Usage:
with AtomicBlobs(get_blob_db()) as db:
# do stuff here that puts or deletes blobs
db.delete(old_blob_id)
info = db.put(content)
save(info, deleted=old_blob_id)
If an exception occurs inside the `AtomicBlobs` context then all
blob write operations (puts and deletes) will be rolled back.
"""
def __init__(self, db):
self.db = db
self.puts = None
self.deletes = None
def put(self, content, basename="", bucket=DEFAULT_BUCKET):
if self.puts is None:
raise InvalidContext("AtomicBlobs context is not active")
info = self.db.put(content, bucket=bucket)
self.puts.append((info, bucket))
return info
def get(self, *args, **kw):
return self.db.get(*args, **kw)
def delete(self, *args, **kw):
"""Delete a blob or bucket of blobs
NOTE blobs will not actually be deleted until the context exits,
so subsequent gets inside the context will return an object even
though the blob or bucket has been queued for deletion.
"""
if self.puts is None:
raise InvalidContext("AtomicBlobs context is not active")
self.db.get_args_for_delete(*args, **kw) # validate args
self.deletes.append((args, kw))
return None # result is unknown
def copy_blob(self, *args, **kw):
raise NotImplementedError
def get_identifier(self, *args, **kw):
return self.db.get_identifier(*args, **kw)
def __enter__(self):
self.puts = []
self.deletes = []
return self
def __exit__(self, exc_type, exc_value, tb):
puts, deletes = self.puts, self.deletes
self.puts = None
self.deletes = None
if exc_type is None:
for args, kw in deletes:
self.db.delete(*args, **kw)
else:
for info, bucket in puts:
self.db.delete(info.identifier, bucket)
| bsd-3-clause | Python |
769c7dffb5938b02b681cc4718589ef79ff68b7b | Update Mopsa.py to handle additional options passed through benchexec's xml files | ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec | benchexec/tools/mopsa.py | benchexec/tools/mopsa.py | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2022 Raphaël Monat
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Mopsa.
URL: https://gitlab.com/mopsa/mopsa-analyzer/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("mopsa-sv-comp", subdir="bin/")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "Mopsa"
def cmdline(self, executable, options, task, rlimits):
cmd = [executable, "--program", *task.input_files]
if task.options is not None and "data_model" in task.options:
cmd += ["--data_model", task.options.get("data_model")]
if task.property_file:
cmd += ["--property", task.property_file]
return cmd + list(options)
def determine_result(self, run):
if run.was_timeout:
return "TIMEOUT"
r = run.output[-1] or run.output[-2] # last non-empty line
r = r.lower()
if r.startswith("true"):
return result.RESULT_TRUE_PROP
elif r.startswith("unknown"):
return result.RESULT_UNKNOWN
elif r.startswith("error"):
return result.RESULT_ERROR + r[len("ERROR") :]
else:
return result.RESULT_ERROR + f"(unknown: {r})"
| # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2022 Raphaël Monat
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Mopsa.
URL: https://gitlab.com/mopsa/mopsa-analyzer/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("mopsa-sv-comp", subdir="bin/")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "Mopsa"
def cmdline(self, executable, options, task, rlimits):
cmd = [executable, "--program", *task.input_files]
if task.options is not None and "data_model" in task.options:
cmd += ["--data_model", task.options.get("data_model")]
if task.property_file:
cmd += ["--property", task.property_file]
return cmd
def determine_result(self, run):
if run.was_timeout:
return "TIMEOUT"
r = run.output[-1] or run.output[-2] # last non-empty line
if r.startswith("true"):
return result.RESULT_TRUE_PROP
elif r.startswith("unknown"):
return result.RESULT_UNKNOWN
elif r.startswith("ERROR"):
return result.RESULT_ERROR + r[len("ERROR") :]
else:
return result.RESULT_ERROR + f"(unknown: {r})"
| apache-2.0 | Python |
959f766b1fced4f27c30251f1b78b694a2415326 | Bump version back to 2.0.0b5.dev0 | rigetticomputing/pyquil | pyquil/__init__.py | pyquil/__init__.py | __version__ = "2.0.0b5.dev0"
from pyquil.quil import Program
from pyquil.api import list_quantum_computers, get_qc
| __version__ = "2.0.0b4"
from pyquil.quil import Program
from pyquil.api import list_quantum_computers, get_qc
| apache-2.0 | Python |
2e6f0934c67baf27cdf3930d48d6b733995e413f | Make the query docstring a bit clearer | ClusterHQ/benchmark-server,ClusterHQ/benchmark-server | benchmark/_interfaces.py | benchmark/_interfaces.py | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Interfaces for the benchmarking results server.
"""
from zope.interface import Interface
class IBackend(Interface):
"""
A backend for storing and querying the results.
"""
def store(result):
"""
Store a single benchmarking result.
:param dict result: The result in the JSON compatible format.
:return: A Deferred that produces an identifier for the stored
result.
"""
def retrieve(id):
"""
Retrieve a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires with the result in the JSON format.
"""
def query(filter, limit):
"""
Retrieve previously stored results that match the given filter.
The returned results will have the same values as specified in the
filter for the fields that are specified in the filter.
:param dict filter: The filter in the JSON compatible format.
:param int limit: The number of the results to return. The
results are sorted by their timestamp in descending order.
:return: A Deferred that fires with a list of the results
in the JSON compatible format.
"""
def delete(id):
"""
Delete a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires when the result is removed.
"""
| # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Interfaces for the benchmarking results server.
"""
from zope.interface import Interface
class IBackend(Interface):
"""
A backend for storing and querying the results.
"""
def store(result):
"""
Store a single benchmarking result.
:param dict result: The result in the JSON compatible format.
:return: A Deferred that produces an identifier for the stored
result.
"""
def retrieve(id):
"""
Retrieve a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires with the result in the JSON format.
"""
def query(filter, limit):
"""
Retrieve previously stored results that match the given filter.
The returned results will have the same values as specified in the
filter for the fields that are specified in the filter.
:param dict filter: The filter in the JSON compatible format.
:param int limit: The number of the *latest* results to return.
:return: A Deferred that fires with a list of the results
in the JSON compatible format.
"""
def delete(id):
"""
Delete a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires when the result is removed.
"""
| apache-2.0 | Python |
33c33b792dc1ed9acdd3f5331afd5a42385d20ce | Use Write Event in echoserver.py | eriol/circuits,treemo/circuits,nizox/circuits,treemo/circuits,eriol/circuits,eriol/circuits,treemo/circuits | examples/echoserver.py | examples/echoserver.py | #!/usr/bin/env python
from circuits.net.sockets import TCPServer, Write
class EchoServer(TCPServer):
def read(self, sock, data):
self.push(Write(sock, data))
EchoServer(8000).run()
| #!/usr/bin/env python
from circuits.net.sockets import TCPServer
class EchoServer(TCPServer):
def read(self, sock, data):
self.write(sock, data)
EchoServer(8000).run()
| mit | Python |
3122965316a6d8f99d737fa46450ed9aeb5c4811 | make item_number unique | byteweaver/django-eca-catalogue | eca_catalogue/abstract_models.py | eca_catalogue/abstract_models.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from treebeard.mp_tree import MP_Node
class NSDMixin(models.Model):
name = models.CharField(_("Name"), max_length=128)
slug = models.SlugField(_("Slug"), max_length=128, unique=True)
description = models.TextField(_("Description"), blank=True, null=True)
class Meta:
abstract = True
class AbstractProductCategory(NSDMixin):
class Meta:
abstract = True
verbose_name = _("Product category")
verbose_name_plural = _("Product categories")
ordering = ['name']
def __unicode__(self):
return self.name
class AbstractNestedProductCategory(MP_Node, NSDMixin):
class Meta:
abstract = True
verbose_name = _("Nested product category")
verbose_name_plural = _("Nested product categories")
ordering = ['name']
def __unicode__(self):
if not self.is_root():
return unicode(self.get_parent()) + " -> " + self.name
return self.name
class AbstractProduct(NSDMixin):
item_number = models.CharField(_("Item number"), max_length=255, unique=True)
class Meta:
abstract = True
verbose_name = _("Product")
verbose_name_plural = _("Products")
ordering = ['name']
def __unicode__(self):
return "%s (%s)" % (self.name, self.item_number)
class AbstractSellingPoint(models.Model):
text = models.CharField(_("Text"), max_length=255)
class Meta:
abstract = True
verbose_name = _("Selling point")
verbose_name_plural = _("Selling points")
def __unicode__(self):
return self.text
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from treebeard.mp_tree import MP_Node
class NSDMixin(models.Model):
name = models.CharField(_("Name"), max_length=128)
slug = models.SlugField(_("Slug"), max_length=128, unique=True)
description = models.TextField(_("Description"), blank=True, null=True)
class Meta:
abstract = True
class AbstractProductCategory(NSDMixin):
class Meta:
abstract = True
verbose_name = _("Product category")
verbose_name_plural = _("Product categories")
ordering = ['name']
def __unicode__(self):
return self.name
class AbstractNestedProductCategory(MP_Node, NSDMixin):
class Meta:
abstract = True
verbose_name = _("Nested product category")
verbose_name_plural = _("Nested product categories")
ordering = ['name']
def __unicode__(self):
if not self.is_root():
return unicode(self.get_parent()) + " -> " + self.name
return self.name
class AbstractProduct(NSDMixin):
item_number = models.CharField(_("Item number"), max_length=255)
class Meta:
abstract = True
verbose_name = _("Product")
verbose_name_plural = _("Products")
ordering = ['name']
def __unicode__(self):
return "%s (%s)" % (self.name, self.item_number)
class AbstractSellingPoint(models.Model):
text = models.CharField(_("Text"), max_length=255)
class Meta:
abstract = True
verbose_name = _("Selling point")
verbose_name_plural = _("Selling points")
def __unicode__(self):
return self.text
| bsd-3-clause | Python |
857897a88811153f7460472219fd78d4e68bdc12 | bump pkg version | sckott/habanero | habanero/__init__.py | habanero/__init__.py | # -*- coding: utf-8 -*-
# habanero
"""
habanero library
~~~~~~~~~~~~~~~~~~~~~
habanero is a low level client for the Crossref search API.
Usage::
from habanero import Crossref
cr = Crossref()
# setup a different base URL
Crossref(base_url = "http://some.other.url")
# setup an api key
Crossref(api_key = "123456")
# Make request against works route
cr.works(ids = '10.1371/journal.pone.0033693')
# curl options
## verbose curl output
### setup first
import requests
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
### then make request
cr.works(query = "ecology")
"""
__title__ = "habanero"
__version__ = "1.1.0"
__author__ = "Scott Chamberlain"
__license__ = "MIT"
from .crossref import Crossref, WorksContainer
from .cn import content_negotiation, csl_styles
from .counts import citation_count
from .exceptions import *
| # -*- coding: utf-8 -*-
# habanero
"""
habanero library
~~~~~~~~~~~~~~~~~~~~~
habanero is a low level client for the Crossref search API.
Usage::
from habanero import Crossref
cr = Crossref()
# setup a different base URL
Crossref(base_url = "http://some.other.url")
# setup an api key
Crossref(api_key = "123456")
# Make request against works route
cr.works(ids = '10.1371/journal.pone.0033693')
# curl options
## verbose curl output
### setup first
import requests
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
### then make request
cr.works(query = "ecology")
"""
__title__ = "habanero"
__version__ = "1.0.1"
__author__ = "Scott Chamberlain"
__license__ = "MIT"
from .crossref import Crossref, WorksContainer
from .cn import content_negotiation, csl_styles
from .counts import citation_count
from .exceptions import *
| mit | Python |
85fa2d64a697cb4049f20414426183738ee7ebc5 | Add key-word arguments to layout.Layout | cortesi/countershape,mhils/countershape,samtaufa/countershape,cortesi/countershape,mhils/countershape,samtaufa/countershape | countershape/layout.py | countershape/layout.py | import html, template
_dtd = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
class Layout:
"""
A basic framework for layout objects.
"""
bodyClass = ""
components = ("pageTitle", "body", "header")
def __init__(self, path = None, **kwargs):
if path:
body = template.File(False, path)
else:
body = html.Value("body")
meta = html.META(
content = "text/html; charset=utf-8"
)
meta["http-equiv"] = "Content-Type"
htmlBody = html.BODY(body, **kwargs)
if self.bodyClass:
htmlBody["class"] = self.bodyClass
self.frame = html.Group(
html.RawStr(_dtd),
html.HTML(
html.HEAD(
meta,
html.Value("header"),
html.TITLE(html.Value("pageTitle"))
),
htmlBody,
xmlns = "http://www.w3.org/1999/xhtml",
)
)
def __call__(self, page):
data = {}
for i in self.components:
c = page._getLayoutComponent(i)
data[i] = unicode(c)
return self.frame(**data)
| import html, template
_dtd = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
class Layout:
"""
A basic framework for layout objects.
"""
bodyClass = ""
components = ("pageTitle", "body", "header")
def __init__(self, path = None):
if path:
body = template.File(False, path)
else:
body = html.Value("body")
meta = html.META(
content = "text/html; charset=utf-8"
)
meta["http-equiv"] = "Content-Type"
htmlBody = html.BODY(body)
if self.bodyClass:
htmlBody["class"] = self.bodyClass
self.frame = html.Group(
html.RawStr(_dtd),
html.HTML(
html.HEAD(
meta,
html.Value("header"),
html.TITLE(html.Value("pageTitle"))
),
htmlBody,
xmlns = "http://www.w3.org/1999/xhtml",
)
)
def __call__(self, page):
data = {}
for i in self.components:
c = page._getLayoutComponent(i)
data[i] = unicode(c)
return self.frame(**data)
| mit | Python |
da548f7d2e69b76901f4f68ae2112946bb9632f6 | Bump version to 3.0.1 | pystorm/pystorm | pystorm/version.py | pystorm/version.py | # -*- coding: utf-8 -*-
# Copyright 2014-2015 Parsely, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module exists solely for version information so we only have to change it
in one place. Based on the suggestion `here. <http://bit.ly/16LbuJF>`_
:organization: Parsely
'''
def _safe_int(string):
""" Simple function to convert strings into ints without dying. """
try:
return int(string)
except ValueError:
return string
__version__ = '3.0.1'
VERSION = tuple(_safe_int(x) for x in __version__.split('.'))
| # -*- coding: utf-8 -*-
# Copyright 2014-2015 Parsely, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module exists solely for version information so we only have to change it
in one place. Based on the suggestion `here. <http://bit.ly/16LbuJF>`_
:organization: Parsely
'''
def _safe_int(string):
""" Simple function to convert strings into ints without dying. """
try:
return int(string)
except ValueError:
return string
__version__ = '3.0.0'
VERSION = tuple(_safe_int(x) for x in __version__.split('.'))
| apache-2.0 | Python |
b2befc496741a904f8988b2ec8fa5b57aba96a91 | Fix the path to read the sample file from | e-mission/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server | bin/deploy/giles_conf.py | bin/deploy/giles_conf.py | import json
sample_path = "conf/net/int_service/giles_conf.json.sample"
f = open(sample_path, "r")
data = json.loads(f.read())
f.close()
real_path = "conf/net/int_service/giles_conf.json"
data['giles_base_url'] = 'http://50.17.111.19:8079'
f = open(real_path, "w")
f.write(json.dumps(data))
f.close()
| import json
sample_path = "conf/net/int_service/giles_conf.json.sample"
f = open(path, "r")
data = json.loads(f.read())
f.close()
real_path = "conf/net/int_service/giles_conf.json"
data['giles_base_url'] = 'http://50.17.111.19:8079'
f = open(real_path, "w")
f.write(json.dumps(data))
f.close()
| bsd-3-clause | Python |
86636212c38592769abb421c9338174673fdbcaa | remove make_purchase_invoice from demo script | gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext | erpnext/demo/user/fixed_asset.py | erpnext/demo/user/fixed_asset.py |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.make_random import get_random
from erpnext.assets.doctype.asset.asset import make_sales_invoice
from erpnext.assets.doctype.asset.depreciation import post_depreciation_entries, scrap_asset
def work():
frappe.set_user(frappe.db.get_global('demo_accounts_user'))
# Enable booking asset depreciation entry automatically
frappe.db.set_value("Accounts Settings", None, "book_asset_depreciation_entry_automatically", 1)
# post depreciation entries as on today
post_depreciation_entries()
# scrap a random asset
frappe.db.set_value("Company", "Wind Power LLC", "disposal_account", "Gain/Loss on Asset Disposal - WPL")
asset = get_random_asset()
scrap_asset(asset.name)
# Sell a random asset
sell_an_asset()
def sell_an_asset():
asset = get_random_asset()
si = make_sales_invoice(asset.name, asset.item_code, "Wind Power LLC")
si.customer = get_random("Customer")
si.get("items")[0].rate = asset.value_after_depreciation * 0.8 \
if asset.value_after_depreciation else asset.gross_purchase_amount * 0.9
si.save()
si.submit()
def get_random_asset():
return frappe.db.sql(""" select name, item_code, value_after_depreciation, gross_purchase_amount
from `tabAsset`
where docstatus=1 and status not in ("Scrapped", "Sold") order by rand() limit 1""", as_dict=1)[0]
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.make_random import get_random
from erpnext.assets.doctype.asset.asset import make_purchase_invoice, make_sales_invoice
from erpnext.assets.doctype.asset.depreciation import post_depreciation_entries, scrap_asset
def work():
frappe.set_user(frappe.db.get_global('demo_accounts_user'))
asset_list = make_asset_purchase_entry()
if not asset_list:
# fixed_asset.work() already run
return
# Enable booking asset depreciation entry automatically
frappe.db.set_value("Accounts Settings", None, "book_asset_depreciation_entry_automatically", 1)
# post depreciation entries as on today
post_depreciation_entries()
# scrap a random asset
frappe.db.set_value("Company", "Wind Power LLC", "disposal_account", "Gain/Loss on Asset Disposal - WPL")
asset = get_random_asset()
scrap_asset(asset.name)
# Sell a random asset
sell_an_asset()
def make_asset_purchase_entry():
asset_list = frappe.get_all("Asset", filters={"purchase_invoice": ["in", ("", None)]},
fields=["name", "item_code", "gross_purchase_amount", "company", "purchase_date"])
# make purchase invoice
for asset in asset_list:
pi = make_purchase_invoice(asset.name, asset.item_code, asset.gross_purchase_amount,
asset.company, asset.purchase_date)
pi.supplier = get_random("Supplier")
pi.save()
pi.submit()
return asset_list
def sell_an_asset():
asset = get_random_asset()
si = make_sales_invoice(asset.name, asset.item_code, "Wind Power LLC")
si.customer = get_random("Customer")
si.get("items")[0].rate = asset.value_after_depreciation * 0.8 \
if asset.value_after_depreciation else asset.gross_purchase_amount * 0.9
si.save()
si.submit()
def get_random_asset():
return frappe.db.sql(""" select name, item_code, value_after_depreciation, gross_purchase_amount
from `tabAsset`
where docstatus=1 and status not in ("Scrapped", "Sold") order by rand() limit 1""", as_dict=1)[0]
| agpl-3.0 | Python |
472d626fcc87b7495967ca41bbed500d6d63f593 | Add NoLogger, a Logger that does not logs | alvarogzp/telegram-bot,alvarogzp/telegram-bot | bot/logger/logger.py | bot/logger/logger.py | import time
from bot.action.util.textformat import FormattedText
from bot.logger.message_sender import MessageSender
LOG_ENTRY_FORMAT = "{time} [{tag}] {text}"
TEXT_SEPARATOR = " | "
class Logger:
def __init__(self, sender: MessageSender):
self.sender = sender
def log(self, tag, *texts):
text = self._get_text_to_send(tag, *texts)
self.sender.send(text)
def _get_text_to_send(self, tag, *texts):
raise NotImplementedError()
class PlainTextLogger(Logger):
def _get_text_to_send(self, tag: str, *texts: str):
text = TEXT_SEPARATOR.join(texts)
return LOG_ENTRY_FORMAT.format(time=time.strftime("%X"), tag=tag, text=text)
class FormattedTextLogger(Logger):
def _get_text_to_send(self, tag: FormattedText, *texts: FormattedText):
text = FormattedText().normal(TEXT_SEPARATOR).join(texts)
return FormattedText().normal(LOG_ENTRY_FORMAT).start_format()\
.normal(time=time.strftime("%X")).concat(tag=tag).concat(text=text).end_format()
class NoLogger(Logger):
def __init__(self):
super().__init__(None)
def log(self, tag, *texts):
pass
def _get_text_to_send(self, tag, *texts):
pass
class LoggerFactory:
@classmethod
def get(cls, logger_type: str, sender: MessageSender):
if logger_type == "formatted":
return cls.get_formatted(sender)
elif logger_type == "plain":
return cls.get_plain(sender)
elif logger_type == "none":
return cls.get_no_logger()
else:
raise Exception("Unknown Logger requested (" + logger_type + ")")
@staticmethod
def get_formatted(sender: MessageSender):
return FormattedTextLogger(sender)
@staticmethod
def get_plain(sender: MessageSender):
return PlainTextLogger(sender)
@staticmethod
def get_no_logger():
return NoLogger()
| import time
from bot.action.util.textformat import FormattedText
from bot.logger.message_sender import MessageSender
LOG_ENTRY_FORMAT = "{time} [{tag}] {text}"
TEXT_SEPARATOR = " | "
class Logger:
def __init__(self, sender: MessageSender):
self.sender = sender
def log(self, tag, *texts):
text = self._get_text_to_send(tag, *texts)
self.sender.send(text)
def _get_text_to_send(self, tag, *texts):
raise NotImplementedError()
class PlainTextLogger(Logger):
def _get_text_to_send(self, tag: str, *texts: str):
text = TEXT_SEPARATOR.join(texts)
return LOG_ENTRY_FORMAT.format(time=time.strftime("%X"), tag=tag, text=text)
class FormattedTextLogger(Logger):
def _get_text_to_send(self, tag: FormattedText, *texts: FormattedText):
text = FormattedText().normal(TEXT_SEPARATOR).join(texts)
return FormattedText().normal(LOG_ENTRY_FORMAT).start_format()\
.normal(time=time.strftime("%X")).concat(tag=tag).concat(text=text).end_format()
class LoggerFactory:
@classmethod
def get(cls, logger_type: str, sender: MessageSender):
if logger_type == "formatted":
return cls.get_formatted(sender)
elif logger_type == "plain":
return cls.get_plain(sender)
else:
raise Exception("Unknown Logger requested (" + logger_type + ")")
@staticmethod
def get_formatted(sender: MessageSender):
return FormattedTextLogger(sender)
@staticmethod
def get_plain(sender: MessageSender):
return PlainTextLogger(sender)
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.