commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
c16fb96de6154dec7bf0fc934dd9f7e1ac4b69f4 | bump version | curtsies/__init__.py | curtsies/__init__.py | """Terminal-formatted strings"""
__version__='0.1.15'
from .window import FullscreenWindow, CursorAwareWindow
from .input import Input
from .termhelpers import Nonblocking, Cbreak, Termmode
from .formatstring import FmtStr, fmtstr
from .formatstringarray import FSArray, fsarray
| """Terminal-formatted strings"""
__version__='0.1.14'
from .window import FullscreenWindow, CursorAwareWindow
from .input import Input
from .termhelpers import Nonblocking, Cbreak, Termmode
from .formatstring import FmtStr, fmtstr
from .formatstringarray import FSArray, fsarray
| Python | 0 |
0601e5214a75921696f50691285166dcda06288b | switch VR separator to -- | tcpbridge/tcpbridge.py | tcpbridge/tcpbridge.py | #!/usr/bin/env python3
import select
import socket
import sys
class TcpBridge:
def __init__(self):
self.sockets = []
self.socket2remote = {}
def routerintf2addr(self, hostintf):
hostname, interface = hostintf.split("/")
try:
res = socket.getaddrinfo(hostname, "100%02d" % int(interface))
except socket.gaierror:
raise NoVR("Unable to resolve %s" % hostname)
sockaddr = res[0][4]
return sockaddr
def add_p2p(self, p2p):
source, destination = p2p.split("--")
src_router, src_interface = source.split("/")
dst_router, dst_interface = destination.split("/")
src = self.routerintf2addr(source)
dst = self.routerintf2addr(destination)
self.add_bridge(src, dst)
def add_bridge(self, left_addr, right_addr):
left = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
right = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
left.connect(left_addr)
right.connect(right_addr)
# add to list of sockets
self.sockets.append(left)
self.sockets.append(right)
# dict for looking up remote in pair
self.socket2remote[left] = right
self.socket2remote[right] = left
def work(self):
while True:
try:
ir,_,_ = select.select(self.sockets, [], [])
except select.error as exc:
break
for i in ir:
remote = self.socket2remote[i]
buf = i.recv(2048)
if len(buf) == 0:
return
remote.send(buf)
class NoVR(Exception):
""" No virtual router
"""
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--p2p', nargs='+', help='point-to-point link')
args = parser.parse_args()
tt = TcpBridge()
for p2p in args.p2p:
try:
tt.add_p2p(p2p)
except NoVR as exc:
print(exc, " Is it started and did you link it?")
sys.exit(1)
tt.work()
| #!/usr/bin/env python3
import select
import socket
import sys
class TcpBridge:
def __init__(self):
self.sockets = []
self.socket2remote = {}
def routerintf2addr(self, hostintf):
hostname, interface = hostintf.split("/")
try:
res = socket.getaddrinfo(hostname, "100%02d" % int(interface))
except socket.gaierror:
raise NoVR("Unable to resolve %s" % hostname)
sockaddr = res[0][4]
return sockaddr
def add_p2p(self, p2p):
source, destination = p2p.split("-")
src_router, src_interface = source.split("/")
dst_router, dst_interface = destination.split("/")
src = self.routerintf2addr(source)
dst = self.routerintf2addr(destination)
self.add_bridge(src, dst)
def add_bridge(self, left_addr, right_addr):
left = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
right = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
left.connect(left_addr)
right.connect(right_addr)
# add to list of sockets
self.sockets.append(left)
self.sockets.append(right)
# dict for looking up remote in pair
self.socket2remote[left] = right
self.socket2remote[right] = left
def work(self):
while True:
try:
ir,_,_ = select.select(self.sockets, [], [])
except select.error as exc:
break
for i in ir:
remote = self.socket2remote[i]
buf = i.recv(2048)
if len(buf) == 0:
return
remote.send(buf)
class NoVR(Exception):
""" No virtual router
"""
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--p2p', nargs='+', help='point-to-point link')
args = parser.parse_args()
tt = TcpBridge()
for p2p in args.p2p:
try:
tt.add_p2p(p2p)
except NoVR as exc:
print(exc, " Is it started and did you link it?")
sys.exit(1)
tt.work()
| Python | 0 |
e468abbc033a48d0222f50cf85319802f05fc57a | Check doctest | custom/onse/tests.py | custom/onse/tests.py | import doctest
from datetime import date
from nose.tools import assert_equal
from custom.onse import tasks
def test_get_last_quarter():
test_dates = [
(date(2020, 1, 1), '2019Q4'),
(date(2020, 3, 31), '2019Q4'),
(date(2020, 4, 1), '2020Q1'),
(date(2020, 6, 30), '2020Q1'),
(date(2020, 7, 1), '2020Q2'),
(date(2020, 9, 30), '2020Q2'),
(date(2020, 10, 1), '2020Q3'),
(date(2020, 12, 31), '2020Q3'),
]
for test_date, expected_value in test_dates:
last_quarter = tasks.get_last_quarter(test_date)
assert_equal(last_quarter, expected_value)
def test_doctests():
results = doctest.testmod(tasks)
assert results.failed == 0
| from datetime import date
from nose.tools import assert_equal
from custom.onse.tasks import get_last_quarter
def test_get_last_quarter():
test_dates = [
(date(2020, 1, 1), '2019Q4'),
(date(2020, 3, 31), '2019Q4'),
(date(2020, 4, 1), '2020Q1'),
(date(2020, 6, 30), '2020Q1'),
(date(2020, 7, 1), '2020Q2'),
(date(2020, 9, 30), '2020Q2'),
(date(2020, 10, 1), '2020Q3'),
(date(2020, 12, 31), '2020Q3'),
]
for test_date, expected_value in test_dates:
last_quarter = get_last_quarter(test_date)
assert_equal(last_quarter, expected_value)
| Python | 0 |
e6357827a670c71e2489b5468b89a65153719ba4 | Fix syntax (backward compatible) | social/strategies/tornado_strategy.py | social/strategies/tornado_strategy.py | import json
from tornado.template import Loader, Template
from social.utils import build_absolute_uri
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class TornadoTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
path, tpl = tpl.rsplit('/', 1)
return Loader(path).load(tpl).generate(**context)
def render_string(self, html, context):
return Template(html).generate(**context)
class TornadoStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = TornadoTemplateStrategy
def __init__(self, storage, request_handler, tpl=None):
self.request_handler = request_handler
self.request = self.request_handler.request
super(TornadoStrategy, self).__init__(storage, tpl)
def get_setting(self, name):
return self.request_handler.settings[name]
def request_data(self, merge=True):
# Multiple valued arguments not supported yet
return dict((key, val[0])
for key, val in self.request.arguments.iteritems())
def request_host(self):
return self.request.host
def redirect(self, url):
return self.request_handler.redirect(url)
def html(self, content):
self.request_handler.write(content)
def session_get(self, name, default=None):
return self.request_handler.get_secure_cookie(name) or default
def session_set(self, name, value):
self.request_handler.set_secure_cookie(name, str(value))
def session_pop(self, name):
value = self.request_handler.get_secure_cookie(name)
self.request_handler.set_secure_cookie(name, '')
return value
def session_setdefault(self, name, value):
pass
def build_absolute_uri(self, path=None):
return build_absolute_uri('{0}://{1}'.format(self.request.protocol,
self.request.host),
path)
def partial_to_session(self, next, backend, request=None, *args, **kwargs):
return json.dumps(super(TornadoStrategy, self).partial_to_session(
next, backend, request=request, *args, **kwargs
))
def partial_from_session(self, session):
if session:
return super(TornadoStrategy, self).partial_to_session(
json.loads(session)
)
| import json
from tornado.template import Loader, Template
from social.utils import build_absolute_uri
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class TornadoTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
path, tpl = tpl.rsplit('/', 1)
return Loader(path).load(tpl).generate(**context)
def render_string(self, html, context):
return Template(html).generate(**context)
class TornadoStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = TornadoTemplateStrategy
def __init__(self, storage, request_handler, tpl=None):
self.request_handler = request_handler
self.request = self.request_handler.request
super(TornadoStrategy, self).__init__(storage, tpl)
def get_setting(self, name):
return self.request_handler.settings[name]
def request_data(self, merge=True):
# Multiple valued arguments not supported yet
return {key: val[0] for key, val in self.request.arguments.iteritems()}
def request_host(self):
return self.request.host
def redirect(self, url):
return self.request_handler.redirect(url)
def html(self, content):
self.request_handler.write(content)
def session_get(self, name, default=None):
return self.request_handler.get_secure_cookie(name) or default
def session_set(self, name, value):
self.request_handler.set_secure_cookie(name, str(value))
def session_pop(self, name):
value = self.request_handler.get_secure_cookie(name)
self.request_handler.set_secure_cookie(name, '')
return value
def session_setdefault(self, name, value):
pass
def build_absolute_uri(self, path=None):
return build_absolute_uri('{0}://{1}'.format(self.request.protocol,
self.request.host),
path)
def partial_to_session(self, next, backend, request=None, *args, **kwargs):
return json.dumps(super(TornadoStrategy, self).partial_to_session(
next, backend, request=request, *args, **kwargs
))
def partial_from_session(self, session):
if session:
return super(TornadoStrategy, self).partial_to_session(
json.loads(session)
)
| Python | 0.000001 |
e6c994b87fed7c12fae4b52c6311d105fe45ddbf | Make logging even better | giftwrap_plugins/builders/package_meta.py | giftwrap_plugins/builders/package_meta.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015, IBM
# Copyright 2015, Craig Tracey <craigtracey@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import csv
import logging
import requests
from collections import OrderedDict
from giftwrap.builders.package_builder import PackageBuilder
from six import StringIO
BASE_PYPI_URL = 'http://pypi.python.org/pypi/%(package)s/%(version)s/json'
ordered_fieldnames = OrderedDict([
('project_name', None),
('package', None),
('version', None),
('homepage', None),
('license_info', None),
])
LOG = logging.getLogger(__name__)
class PackageMetaBuilder(PackageBuilder):
def __init__(self, build_spec):
super(PackageMetaBuilder, self).__init__(build_spec)
self._project_deps = {}
logging.getLogger("requests").setLevel(logging.WARNING)
def _finalize_project_build(self, project):
super(PackageMetaBuilder, self)._finalize_project_build(project)
self._log_metadata(project)
def _finalize_build(self):
super(PackageMetaBuilder, self)._finalize_build()
logged_deps = ""
for (project_name, deps_info) in self._project_deps.iteritems():
logged_deps += deps_info
LOG.info("Python Dependency metadata:\n\n%s", logged_deps)
def _log_metadata(self, project):
dependencies = self._extract_dependencies(project)
output = StringIO()
writer = csv.DictWriter(output, delimiter=',',
quoting=csv.QUOTE_MINIMAL,
fieldnames=ordered_fieldnames)
for dep in dependencies:
license, homepage = self._get_package_license_homepage(**dep)
info = dep
info['license_info'] = license
info['homepage'] = homepage
info['project_name'] = project.name
writer.writerow(info)
self._project_deps[project.name] = output.getvalue()
output.close()
def _get_package_license_homepage(self, package, version):
url = BASE_PYPI_URL % locals()
resp = requests.get(url)
license = None
homepage = None
if resp.status_code == 200:
data = resp.json()
license = data['info'].get('license', None)
homepage = data['info'].get('home_page', None)
return license, homepage
def _extract_dependencies(self, project):
pip_path = self._get_venv_pip_path(project.install_path)
cmd = "%s freeze" % pip_path
freeze = self._execute(cmd)
dependencies = []
for dep in freeze.split('\n'):
parts = dep.split('==')
if len(parts) == 2:
data = {'package': parts[0],
'version': parts[1]}
dependencies.append(data)
return dependencies
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015, IBM
# Copyright 2015, Craig Tracey <craigtracey@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import csv
import logging
import requests
from collections import OrderedDict
from giftwrap.builders.package_builder import PackageBuilder
from six import StringIO
BASE_PYPI_URL = 'http://pypi.python.org/pypi/%(package)s/%(version)s/json'
ordered_fieldnames = OrderedDict([
('project_name', None),
('package', None),
('version', None),
('homepage', None),
('license_info', None),
])
LOG = logging.getLogger(__name__)
class PackageMetaBuilder(PackageBuilder):
def __init__(self, build_spec):
super(PackageMetaBuilder, self).__init__(build_spec)
self._project_deps = {}
logging.getLogger("requests").setLevel(logging.WARNING)
def _finalize_project_build(self, project):
super(PackageMetaBuilder, self)._finalize_project_build(project)
self._log_metadata(project)
def _finalize_build(self):
super(PackageMetaBuilder, self)._finalize_build()
LOG.info("Python Dependency metadata:\n\n")
for (project_name, deps_info) in self._project_deps.iteritems():
LOG.info(deps_info)
def _log_metadata(self, project):
dependencies = self._extract_dependencies(project)
output = StringIO()
writer = csv.DictWriter(output, delimiter=',',
quoting=csv.QUOTE_MINIMAL,
fieldnames=ordered_fieldnames)
for dep in dependencies:
license, homepage = self._get_package_license_homepage(**dep)
info = dep
info['license_info'] = license
info['homepage'] = homepage
info['project_name'] = project.name
writer.writerow(info)
self._project_deps[project.name] = output.getvalue()
output.close()
def _get_package_license_homepage(self, package, version):
url = BASE_PYPI_URL % locals()
resp = requests.get(url)
license = None
homepage = None
if resp.status_code == 200:
data = resp.json()
license = data['info'].get('license', None)
homepage = data['info'].get('home_page', None)
return license, homepage
def _extract_dependencies(self, project):
pip_path = self._get_venv_pip_path(project.install_path)
cmd = "%s freeze" % pip_path
freeze = self._execute(cmd)
dependencies = []
for dep in freeze.split('\n'):
parts = dep.split('==')
if len(parts) == 2:
data = {'package': parts[0],
'version': parts[1]}
dependencies.append(data)
return dependencies
| Python | 0 |
6bfab23170c108c50c9b2dc4988e8670ed677d65 | Allow including html files. Build script made executable. | build.py | build.py | #!/usr/bin/python
import distutils.core
from os import path
import re
include_folder = 'slides'
include_templates = ['{}.html', '{}.md']
include_regex = re.compile('@@([a-zA-Z0-9-_]+)')
in_file = 'index.html'
out_folder = '../dist'
out_file_name = 'index.html'
dirs_to_copy = ['css', 'js', 'lib', 'plugin']
def main():
print('Copying static directories...')
for directory in dirs_to_copy:
target = path.join(out_folder, directory)
if path.exists(target):
distutils.dir_util.remove_tree(target) #WARNING: THIS ACTUALLY REPLACES THE OLD ONE, SO BE CAREFUL
distutils.dir_util.copy_tree(directory, target)
print('{} copied'.format(directory))
print('All copied.')
print('Processing {} file...'.format(in_file))
with open(path.join(out_folder, out_file_name), 'w+') as fout:
with open(in_file, 'r') as fin:
text = fin.read()
text = include_regex.sub(processIncludeMatch, text)
fout.write(text)
print('{} file processed.'.format(in_file))
print('All done!')
def processIncludeMatch(match):
return includeFile(match.group(1))
def includeFile(name):
filename = ''
exists = False
for template in include_templates:
filename = path.join(include_folder, template.format(name))
if path.isfile(filename):
exists = True
break
if exists:
print('>> File {} included'.format(filename))
with open(filename, 'r') as f:
return f.read()
main()
| import distutils.core
from os import path
import re
include_folder = 'slides'
include_template = '{}.md'
include_regex = re.compile('@@([a-zA-Z0-9-_]+)')
in_file = 'index.html'
out_folder = '../dist'
out_file_name = 'index.html'
dirs_to_copy = ['css', 'js', 'lib', 'plugin']
def main():
print('Copying static directories...')
for directory in dirs_to_copy:
target = path.join(out_folder, directory)
if path.exists(target):
distutils.dir_util.remove_tree(target) #WARNING: THIS ACTUALLY REPLACES THE OLD ONE, SO BE CAREFUL
distutils.dir_util.copy_tree(directory, target)
print('{} copied'.format(directory))
print('All copied.')
print('Processing {} file...'.format(in_file))
with open(path.join(out_folder, out_file_name), 'w+') as fout:
with open(in_file, 'r') as fin:
text = fin.read()
matches = include_regex.findall(text) #save matches to print them
text = include_regex.sub(processIncludeMatch, text)
fout.write(text)
if matches is not None:
for match in matches:
print('>> File {} included'.format(include_template.format(match)))
print('{} file processed.'.format(in_file))
print('All done!')
def processIncludeMatch(match):
return includeFile(match.group(1))
def includeFile(name):
filename = path.join(include_folder, include_template.format(name))
with open(filename, 'r') as f:
return f.read()
main()
| Python | 0 |
9179907357c6e8aad33a8a5e5cd39b164b2f9cc0 | Update BUILD_OSS to 4680. | src/data/version/mozc_version_template.bzl | src/data/version/mozc_version_template.bzl | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4680
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
| # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4666
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
| Python | 0 |
db2bb0356cfdf486a9e628726cd4e5879311fe8b | update version | src/BJRobot/version.py | src/BJRobot/version.py | VERSION = '0.5.0'
| VERSION = '0.4.0'
| Python | 0 |
137271045313a12bbe9388ab1ac6c8cb786b32b7 | Reset mock befor running test. | guardian/testapp/tests/test_management.py | guardian/testapp/tests/test_management.py | from __future__ import absolute_import
from __future__ import unicode_literals
from guardian.compat import get_user_model
from guardian.compat import mock
from guardian.compat import unittest
from guardian.management import create_anonymous_user
import django
mocked_get_init_anon = mock.Mock()
class TestGetAnonymousUser(unittest.TestCase):
@unittest.skipUnless(django.VERSION >= (1, 5), "Django >= 1.5 only")
@mock.patch('guardian.management.guardian_settings')
def test_uses_custom_function(self, guardian_settings):
mocked_get_init_anon.reset_mock()
path = 'guardian.testapp.tests.test_management.mocked_get_init_anon'
guardian_settings.GET_INIT_ANONYMOUS_USER = path
guardian_settings.ANONYMOUS_USER_ID = 219
User = get_user_model()
anon = mocked_get_init_anon.return_value = mock.Mock()
create_anonymous_user('sender')
mocked_get_init_anon.assert_called_once_with(User)
self.assertEqual(anon.pk, 219)
anon.save.assert_called_once_with()
| from __future__ import absolute_import
from __future__ import unicode_literals
from guardian.compat import get_user_model
from guardian.compat import mock
from guardian.compat import unittest
from guardian.management import create_anonymous_user
import django
mocked_get_init_anon = mock.Mock()
class TestGetAnonymousUser(unittest.TestCase):
@unittest.skipUnless(django.VERSION >= (1, 5), "Django >= 1.5 only")
@mock.patch('guardian.management.guardian_settings')
def test_uses_custom_function(self, guardian_settings):
path = 'guardian.testapp.tests.test_management.mocked_get_init_anon'
guardian_settings.GET_INIT_ANONYMOUS_USER = path
guardian_settings.ANONYMOUS_USER_ID = 219
User = get_user_model()
anon = mocked_get_init_anon.return_value = mock.Mock()
create_anonymous_user('sender')
mocked_get_init_anon.assert_called_once_with(User)
self.assertEqual(anon.pk, 219)
anon.save.assert_called_once_with()
| Python | 0 |
30020d3826a2460288b6a57963753787020a945a | Implement support for the 'D' type in packb() | temporenc/temporenc.py | temporenc/temporenc.py |
import struct
SUPPORTED_TYPES = set([
'D',
'T',
'DT',
'DTZ',
'DTS',
'DTSZ',
])
STRUCT_32 = struct.Struct('>L')
def packb(type=None, year=None, month=None, day=None):
"""
Pack date and time information into a byte string.
:return: encoded temporenc value
:rtype: bytes
"""
# Input validation
if type not in SUPPORTED_TYPES:
raise ValueError("invalid temporenc type: {0!r}".format(type))
if year is None:
year = 4095
elif not 0 <= year <= 4094:
raise ValueError("'year' not in supported range")
if month is None:
month = 15
elif not 1 <= month <= 12:
raise ValueError("'month' not in supported range")
if day is None:
day = 31
elif not 1 <= day <= 31:
raise ValueError("'day' not in supported range")
# Component packing
if 'D' in type:
d = (year << 9) | (month - 1 << 5) | (day - 1)
# Byte packing
if type == 'D':
# Format: 100DDDDD DDDDDDDD DDDDDDDD
return STRUCT_32.pack(0b100 << 21 | d)[1:]
raise NotImplementedError()
|
def packb(type=None, year=None, month=None, day=None):
raise NotImplementedError()
| Python | 0.00022 |
7c63030bd70b32ec4c13ff4273d103ddbb0ffa0f | include tumblrprofile in djangoadmin | hackathon_starter/hackathon/admin.py | hackathon_starter/hackathon/admin.py | from django.contrib import admin
from hackathon.models import UserProfile, Profile, InstagramProfile, TwitterProfile, MeetupToken, GithubProfile, LinkedinProfile, TumblrProfile
# Register your models here.
class TwitterProfileAdmin(admin.ModelAdmin):
list_display = ('user','twitter_user')
admin.site.register(UserProfile)
admin.site.register(Profile)
admin.site.register(InstagramProfile)
admin.site.register(TwitterProfile, TwitterProfileAdmin)
admin.site.register(GithubProfile)
admin.site.register(MeetupToken)
admin.site.register(LinkedinProfile)
admin.site.register(TumblrProfile)
| from django.contrib import admin
from hackathon.models import UserProfile, Profile, InstagramProfile, TwitterProfile, MeetupToken, GithubProfile, LinkedinProfile
# Register your models here.
class TwitterProfileAdmin(admin.ModelAdmin):
list_display = ('user','twitter_user')
admin.site.register(UserProfile)
admin.site.register(Profile)
admin.site.register(InstagramProfile)
admin.site.register(TwitterProfile, TwitterProfileAdmin)
admin.site.register(GithubProfile)
admin.site.register(MeetupToken)
admin.site.register(LinkedinProfile)
| Python | 0.000002 |
c3df7d5adf551213c94f2d0e0598552ce6ee9aaf | move collection list filtering logic to db query | hs_collection_resource/page_processors.py | hs_collection_resource/page_processors.py | from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.db.models import Q
from mezzanine.pages.page_processors import processor_for
from hs_core import page_processors
from hs_core.models import BaseResource
from hs_core.views import add_generic_context
from hs_core.views.utils import get_my_resources_list
from .models import CollectionResource
@processor_for(CollectionResource)
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
# current contained resources list
collection_items_list = list(content_model.resources.all())
# get the context from hs_core
context = page_processors.get_page_context(page, request.user,
resource_edit=edit_resource,
extended_metadata_layout=None,
request=request)
if edit_resource:
user = request.user
if not user.is_authenticated():
return HttpResponseForbidden();
user_all_accessible_resource_list = get_my_resources_list(user)
# resource is collectable if
# 1) Shareable=True
# 2) OR, current user is a owner of it
# 3) exclude this resource as well as resources already in the collection
user_all_accessible_resource_list.exclude(short_id=content_model.short_id)\
.exclude(id__in=content_model.resources.all())\
.exclude(Q(raccess__shareable=False) | Q(raccess__owners__contains=user.pk))
context['collection_candidate'] = user_all_accessible_resource_list.all()
context['collection_res_id'] = content_model.short_id
elif isinstance(context, HttpResponseRedirect):
# resource view mode
# sending user to login page
return context
context['deleted_resources'] = content_model.deleted_resources.all()
context['collection'] = collection_items_list
context['edit_mode'] = edit_resource
hs_core_dublin_context = add_generic_context(request, page)
context.update(hs_core_dublin_context)
return context
| from django.http import HttpResponseRedirect
from mezzanine.pages.page_processors import processor_for
from hs_core import page_processors
from hs_core.models import BaseResource
from hs_core.views import add_generic_context
from hs_core.views.utils import get_my_resources_list
from .models import CollectionResource
@processor_for(CollectionResource)
def landing_page(request, page):
content_model = page.get_content_model()
edit_resource = page_processors.check_resource_mode(request)
user = request.user
if user.is_authenticated():
user_all_accessible_resource_list = get_my_resources_list(user)
else: # anonymous user
user_all_accessible_resource_list = list(BaseResource.discoverable_resources.all())
# resource is collectable if
# 1) Shareable=True
# 2) OR, current user is a owner of it
user_all_collectable_resource_list = []
for res in user_all_accessible_resource_list:
if res.raccess.shareable or res.raccess.owners.filter(pk=user.pk).exists():
user_all_collectable_resource_list.append(res)
# current contained resources list
collection_items_list = list(content_model.resources.all())
# get the context from hs_core
context = page_processors.get_page_context(page, request.user,
resource_edit=edit_resource,
extended_metadata_layout=None,
request=request)
if edit_resource:
candidate_resources_list = []
for res in user_all_collectable_resource_list:
if content_model.short_id == res.short_id:
continue # skip current collection resource object
elif res in content_model.resources.all():
continue # skip resources that are already in current collection
candidate_resources_list.append(res)
context['collection_candidate'] = candidate_resources_list
context['collection_res_id'] = content_model.short_id
elif isinstance(context, HttpResponseRedirect):
# resource view mode
# sending user to login page
return context
context['deleted_resources'] = content_model.deleted_resources.all()
context['collection'] = collection_items_list
context['edit_mode'] = edit_resource
hs_core_dublin_context = add_generic_context(request, page)
context.update(hs_core_dublin_context)
return context
| Python | 0 |
75a0dec32210432374b45dbed2845dfe171b9b36 | Set version number to 0.4.1 | climlab/__init__.py | climlab/__init__.py | __version__ = '0.4.1'
# This list defines all the modules that will be loaded if a user invokes
# from climLab import *
# totally out of date!
#__all__ = ["constants", "thermo", "orbital_table",
# "long_orbital_table", "insolation", "ebm",
# "column", "convadj"]
#from climlab import radiation
# this should ensure that we can still import constants.py as climlab.constants
from climlab.utils import constants
from climlab.utils import thermo, legendre
# some more useful shorcuts
#from climlab.model import ebm, column
from climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel
from climlab.model.ebm import EBM, EBM_annual, EBM_seasonal
from climlab.domain import domain
from climlab.domain.field import Field, global_mean
from climlab.domain.axis import Axis
from climlab.domain.initial import column_state, surface_state
from climlab.process.process import Process, process_like, get_axes
from climlab.process.time_dependent_process import TimeDependentProcess
from climlab.process.implicit import ImplicitProcess
from climlab.process.diagnostic import DiagnosticProcess
from climlab.process.energy_budget import EnergyBudget
| __version__ = '0.4.1dev'
# This list defines all the modules that will be loaded if a user invokes
# from climLab import *
# totally out of date!
#__all__ = ["constants", "thermo", "orbital_table",
# "long_orbital_table", "insolation", "ebm",
# "column", "convadj"]
#from climlab import radiation
# this should ensure that we can still import constants.py as climlab.constants
from climlab.utils import constants
from climlab.utils import thermo, legendre
# some more useful shorcuts
#from climlab.model import ebm, column
from climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel
from climlab.model.ebm import EBM, EBM_annual, EBM_seasonal
from climlab.domain import domain
from climlab.domain.field import Field, global_mean
from climlab.domain.axis import Axis
from climlab.domain.initial import column_state, surface_state
from climlab.process.process import Process, process_like, get_axes
from climlab.process.time_dependent_process import TimeDependentProcess
from climlab.process.implicit import ImplicitProcess
from climlab.process.diagnostic import DiagnosticProcess
from climlab.process.energy_budget import EnergyBudget
| Python | 0.999999 |
85af2e031479c78aaef433e2294648125916251a | Improve color palette for cycling Curves | src/rnaseq_lib/plot/opts.py | src/rnaseq_lib/plot/opts.py | import holoviews as hv
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
gene_curves_opts = {
'Curve': {'plot': dict(height=120, width=600, tools=['hover'], invert_xaxis=True, yrotation=45, yaxis='left'),
'style': dict(line_width=1.5)},
'Curve.Percentage_of_Normal_Samples': {'plot': dict(xaxis=None, invert_yaxis=True),
'style': dict(color='Blue')},
'Curve.Gene_Expression': {'plot': dict(xaxis=None),
'style': dict(color='Green')},
'Curve.Log2_Fold_Change': {'plot': dict(height=150),
'style': dict(color='Purple')},
'Scatter': {'style': dict(color='red', size=3)}}
gene_kde_opts = {'Overlay': {'plot': dict(width=500, legend_position='left')}}
gene_distribution_opts = {'BoxWhisker': {'plot': dict(width=875, xrotation=70)}}
gene_de_opts = {
'Scatter': {'plot': dict(color_index='Tissue', legend_position='left', width=700, height=500, tools=['hover']),
'style': dict(cmap='tab20', size=10, alpha=0.5)}}
sample_count_opts = {
'Bars': {'plot': dict(width=875, xrotation=70, tools=['hover'], show_legend=False)}
}
l2fc_by_perc_samples_opts = {
'Curve': {'plot': dict(tools=['hover']),
'style': dict(color=hv.Cycle(values=color_sequence))},
'Overlay': {'plot': dict(legend_position='left', width=500)},
'Spikes': {'plot': dict(spike_length=100),
'style': dict(line_alpha=0.4, line_width=5)}
}
| gene_curves_opts = {
'Curve': {'plot': dict(height=120, width=600, tools=['hover'], invert_xaxis=True, yrotation=45, yaxis='left'),
'style': dict(line_width=1.5)},
'Curve.Percentage_of_Normal_Samples': {'plot': dict(xaxis=None, invert_yaxis=True),
'style': dict(color='Blue')},
'Curve.Gene_Expression': {'plot': dict(xaxis=None),
'style': dict(color='Green')},
'Curve.Log2_Fold_Change': {'plot': dict(height=150),
'style': dict(color='Purple')},
'Scatter': {'style': dict(color='red', size=3)}}
gene_kde_opts = {'Overlay': {'plot': dict(width=500, legend_position='left')}}
gene_distribution_opts = {'BoxWhisker': {'plot': dict(width=875, xrotation=70)}}
gene_de_opts = {
'Scatter': {'plot': dict(color_index='Tissue', legend_position='left', width=700, height=500, tools=['hover']),
'style': dict(cmap='tab20', size=10, alpha=0.5)}}
sample_count_opts = {
'Bars': {'plot': dict(width=875, xrotation=70, tools=['hover'], show_legend=False)}
}
l2fc_by_perc_samples_opts = {
'Curve': {'plot': dict(tools=['hover'])},
'Overlay': {'plot': dict(legend_position='left', width=500)},
'Spikes': {'plot': dict(spike_length=100),
'style': dict(line_alpha=0.4, line_width=5)}
}
| Python | 0.000001 |
72c5168ff71223db32ef37a12fd8781f28bfc433 | change CTCP VERSION reply | circa.py | circa.py | #!/usr/bin/env python3
import sdirc
import yaml
import threading
import importlib
import modules
VERSION = "1.0"
class Circa(sdirc.Client):
def __init__(self, **conf):
conf["autoconn"] = False
conf["prefix"] = conf["prefix"] if "prefix" in conf else "!"
sdirc.Client.__init__(self, **conf)
self.modules = {}
self.add_listener("registered",
lambda m: (self.send("UMODE2", "+B"), self.say("groupserv", "join !bots")))
for module in "cmd module leave".split() + self.conf["modules"]:
self.load_module(module)
self.add_listener("invite", lambda to, by, m: self.join(to))
self.add_listener("ctcp-version", self.version)
self.connect()
def version(self, fr, to, msg):
self.notice(fr, "\x01VERSION circa {0}\x01".format(VERSION))
@staticmethod
def wrap(line):
words = []
width = 80
for word in line.split():
if len(word) + 1 > width:
words.append("\xFF")
width = 80 - len(word)
else:
width = width - len(word) - 1
words.append(word)
line2 = " ".join(words)
sublines = line2.split(" \xFF ")
return sublines
def say(self, to, msg):
msg = [line.rstrip() for line in msg.split("\n")]
for line in msg:
for subline in Circa.wrap(line):
sdirc.Client.say(self, to, subline)
def load_module(self, name):
if name in self.modules:
return 2
try:
m = importlib.import_module("modules." + name).module
if hasattr(m, "require"):
for mod in m.require.split():
self.load_module(mod)
self.modules[name] = module = m(self)
for event, listeners in module.listeners.items():
for listener in listeners:
self.add_listener(event, listener)
return 0
except ImportError:
return 1
except AttributeError:
return 1
except TypeError:
return 1
def unload_module(self, name):
if name not in self.modules:
return 1
module = self.modules[name]
for event, listeners in module.listeners.items():
for listener in listeners:
self.remove_listener(event, listener)
del self.modules[name]
return 0
if __name__ == "__main__":
try:
file = open("config.yaml")
config = yaml.load(file)
file.close()
for c in config:
threading.Thread(target=lambda: Circa(**c)).start()
except KeyboardInterrupt:
print("Bye")
| #!/usr/bin/env python3
import sdirc
import yaml
import threading
import importlib
import modules
VERSION = "1.0"
class Circa(sdirc.Client):
def __init__(self, **conf):
conf["autoconn"] = False
conf["prefix"] = conf["prefix"] if "prefix" in conf else "!"
sdirc.Client.__init__(self, **conf)
self.modules = {}
self.add_listener("registered",
lambda m: (self.send("UMODE2", "+B"), self.say("groupserv", "join !bots")))
for module in "cmd module leave".split() + self.conf["modules"]:
self.load_module(module)
self.add_listener("invite", lambda to, by, m: self.join(to))
self.add_listener("ctcp-version", self.version)
self.connect()
def version(self, fr, to, msg):
self.say(fr, "\x01VERSION circa {0}\x01".format(VERSION))
@staticmethod
def wrap(line):
words = []
width = 80
for word in line.split():
if len(word) + 1 > width:
words.append("\xFF")
width = 80 - len(word)
else:
width = width - len(word) - 1
words.append(word)
line2 = " ".join(words)
sublines = line2.split(" \xFF ")
return sublines
def say(self, to, msg):
msg = [line.rstrip() for line in msg.split("\n")]
for line in msg:
for subline in Circa.wrap(line):
sdirc.Client.say(self, to, subline)
def load_module(self, name):
if name in self.modules:
return 2
try:
m = importlib.import_module("modules." + name).module
if hasattr(m, "require"):
for mod in m.require.split():
self.load_module(mod)
self.modules[name] = module = m(self)
for event, listeners in module.listeners.items():
for listener in listeners:
self.add_listener(event, listener)
return 0
except ImportError:
return 1
except AttributeError:
return 1
except TypeError:
return 1
def unload_module(self, name):
if name not in self.modules:
return 1
module = self.modules[name]
for event, listeners in module.listeners.items():
for listener in listeners:
self.remove_listener(event, listener)
del self.modules[name]
return 0
if __name__ == "__main__":
try:
file = open("config.yaml")
config = yaml.load(file)
file.close()
for c in config:
threading.Thread(target=lambda: Circa(**c)).start()
except KeyboardInterrupt:
print("Bye")
| Python | 0 |
dc6100fea3097d97e7065bd653093798eac84909 | Allow passing in of timezone | kairios/templatetags/kairios_tags.py | kairios/templatetags/kairios_tags.py | import calendar as cal
import datetime
from django import template
from django.util import timezone
import pytz
register = template.Library()
def delta(year, month, d):
mm = month + d
yy = year
if mm > 12:
mm, yy = mm % 12, year + mm / 12
elif mm < 1:
mm, yy = 12 + mm, year - 1
return yy, mm
@register.inclusion_tag("kairios/calendar.html", takes_context=True)
def calendar(context, events, date=None, tz=None, **kwargs):
cal.setfirstweekday(cal.SUNDAY)
if tz:
today = timezone.localtime(timezone.now(), pytz.timezone(tz)).date()
else:
today = datetime.date.today()
if date is None:
date = today
plus_year, plus_month = delta(date.year, date.month, 1)
minus_year, minus_month = delta(date.year, date.month, -1)
next = events.month_url(plus_year, plus_month)
prev = events.month_url(minus_year, minus_month)
events_by_day = events.events_by_day(date.year, date.month)
title = "%s %s" % (cal.month_name[date.month], date.year)
matrix = cal.monthcalendar(date.year, date.month)
grid = []
for week in matrix:
row = []
for day in week:
is_today = date.year == today.year and date.month == today.month and today.day == day
if day:
day_events = events_by_day.get(day, [])
link = events.day_url(date.year, date.month, day, bool(day_events))
row.append((day, day_events, link, is_today))
else:
row.append(None)
grid.append(row)
context.update({
"title": title,
"calendar_date": date,
"prev": prev,
"next": next,
"grid": grid,
})
return context
| import calendar as cal
import datetime
from django import template
register = template.Library()
def delta(year, month, d):
mm = month + d
yy = year
if mm > 12:
mm, yy = mm % 12, year + mm / 12
elif mm < 1:
mm, yy = 12 + mm, year - 1
return yy, mm
@register.inclusion_tag("kairios/calendar.html", takes_context=True)
def calendar(context, events, date=None, **kwargs):
cal.setfirstweekday(cal.SUNDAY)
today = datetime.date.today()
if date is None:
date = today
plus_year, plus_month = delta(date.year, date.month, 1)
minus_year, minus_month = delta(date.year, date.month, -1)
next = events.month_url(plus_year, plus_month)
prev = events.month_url(minus_year, minus_month)
events_by_day = events.events_by_day(date.year, date.month)
title = "%s %s" % (cal.month_name[date.month], date.year)
matrix = cal.monthcalendar(date.year, date.month)
grid = []
for week in matrix:
row = []
for day in week:
is_today = date.year == today.year and date.month == today.month and today.day == day
if day:
day_events = events_by_day.get(day, [])
link = events.day_url(date.year, date.month, day, bool(day_events))
row.append((day, day_events, link, is_today))
else:
row.append(None)
grid.append(row)
context.update({
"title": title,
"calendar_date": date,
"prev": prev,
"next": next,
"grid": grid,
})
return context
| Python | 0.000001 |
fd7577d34ef206869517f3717070880d098d4d8b | change URL dispach rules | cms_content/urls.py | cms_content/urls.py | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from cms_content.views import *
from cms_content.models import *
from cms_content.utils.queryset import queryset_iterator
urlpatterns = patterns ('',
url(r'^$', section_list, {'sections': CMSSection.objects.all()}, name='section'),
url(r'^(?P<slug>\w*)/$', category_list, name='category_list'),
url(r'^(?P<slug>\w*)/(?P<path>\w*)/$', article_list),
url(r'^(?P<slug>[-\w]+)/(?P<path>[-\w]+)/(?P<name>[-\w]+)/$', article_view),
)
| # -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from cms_content.views import *
from cms_content.models import *
from cms_content.utils.queryset import queryset_iterator
urlpatterns = patterns ('',
url(r'^$', section_list, {'sections': CMSSection.objects.all()}, name='section'),
url(r'^(?P<slug>\w*)/$', category_list, name='category_list'),
url(r'^(?P<slug>\w*)/(?P<path>\w*)/$', article_list),
url(r'^(?P<slug>\w*)/(?P<path>\w*)/(?P<name>[\w-]*)/$', article_view),
)
| Python | 0 |
56b98c3f8a091132cd2dc9c1a717df9cdd96439c | Improve titles | src/sentry/constants.py | src/sentry/constants.py | """
sentry.constants
~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided web-server
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
SORT_OPTIONS = SortedDict((
('priority', _('Priority')),
('date', _('Last Seen')),
('new', _('First Seen')),
('freq', _('Frequency')),
('tottime', _('Total Time Spent')),
('avgtime', _('Average Time Spent')),
('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})),
('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})),
))
SORT_CLAUSES = {
'priority': 'sentry_groupedmessage.score',
'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)',
'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)',
'freq': 'sentry_groupedmessage.times_seen',
'tottime': 'sentry_groupedmessage.time_spent_total',
'avgtime': '(sentry_groupedmessage.time_spent_total / sentry_groupedmessage.time_spent_count)',
}
SCORE_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES.update({
'date': 'sentry_groupedmessage.last_seen',
'new': 'sentry_groupedmessage.first_seen',
})
SQLITE_SCORE_CLAUSES = SQLITE_SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES.update({
'date': 'sentry_groupedmessage.last_seen',
'new': 'sentry_groupedmessage.first_seen',
})
MYSQL_SCORE_CLAUSES = SCORE_CLAUSES.copy()
MYSQL_SCORE_CLAUSES.update({
'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)',
'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)',
})
SEARCH_SORT_OPTIONS = SortedDict((
('score', _('Score')),
('date', _('Last Seen')),
('new', _('First Seen')),
))
STATUS_VISIBLE = 0
STATUS_HIDDEN = 1
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_MUTED = 2
STATUS_LEVELS = (
(STATUS_UNRESOLVED, _('Unresolved')),
(STATUS_RESOLVED, _('Resolved')),
(STATUS_MUTED, _('Muted')),
)
MEMBER_OWNER = 0
MEMBER_USER = 50
MEMBER_SYSTEM = 100
MEMBER_TYPES = (
(MEMBER_OWNER, _('Admin')),
(MEMBER_USER, _('User')),
(MEMBER_SYSTEM, _('System Agent')),
)
# A list of values which represent an unset or empty password on
# a User instance.
EMPTY_PASSWORD_VALUES = ('!', '', '$')
PLATFORM_LIST = (
'python',
'django',
'flask',
'php',
'java',
'node.js',
'ios',
'express',
'connect',
'r',
'ruby',
'rails3',
'javascript',
)
PLATFORM_ROOTS = {
'rails3': 'ruby',
'django': 'python',
'flask': 'python',
'express': 'node.js',
'connect': 'node.js',
}
PLATFORM_TITLES = {
'rails3': 'Rails 3 (Ruby)',
'php': 'PHP',
'ios': 'iOS',
'express': 'Express (Node.js)',
'connect': 'Express (Node.js)',
'django': 'Django (Python)',
'flask': 'Flask (Python)',
}
| """
sentry.constants
~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided web-server
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
SORT_OPTIONS = SortedDict((
('priority', _('Priority')),
('date', _('Last Seen')),
('new', _('First Seen')),
('freq', _('Frequency')),
('tottime', _('Total Time Spent')),
('avgtime', _('Average Time Spent')),
('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})),
('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})),
))
SORT_CLAUSES = {
'priority': 'sentry_groupedmessage.score',
'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)',
'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)',
'freq': 'sentry_groupedmessage.times_seen',
'tottime': 'sentry_groupedmessage.time_spent_total',
'avgtime': '(sentry_groupedmessage.time_spent_total / sentry_groupedmessage.time_spent_count)',
}
SCORE_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES.update({
'date': 'sentry_groupedmessage.last_seen',
'new': 'sentry_groupedmessage.first_seen',
})
SQLITE_SCORE_CLAUSES = SQLITE_SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES.update({
'date': 'sentry_groupedmessage.last_seen',
'new': 'sentry_groupedmessage.first_seen',
})
MYSQL_SCORE_CLAUSES = SCORE_CLAUSES.copy()
MYSQL_SCORE_CLAUSES.update({
'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)',
'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)',
})
SEARCH_SORT_OPTIONS = SortedDict((
('score', _('Score')),
('date', _('Last Seen')),
('new', _('First Seen')),
))
STATUS_VISIBLE = 0
STATUS_HIDDEN = 1
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_MUTED = 2
STATUS_LEVELS = (
(STATUS_UNRESOLVED, _('Unresolved')),
(STATUS_RESOLVED, _('Resolved')),
(STATUS_MUTED, _('Muted')),
)
MEMBER_OWNER = 0
MEMBER_USER = 50
MEMBER_SYSTEM = 100
MEMBER_TYPES = (
(MEMBER_OWNER, _('Admin')),
(MEMBER_USER, _('User')),
(MEMBER_SYSTEM, _('System Agent')),
)
# A list of values which represent an unset or empty password on
# a User instance.
EMPTY_PASSWORD_VALUES = ('!', '', '$')
PLATFORM_LIST = (
'python',
'django',
'flask',
'php',
'java',
'node.js',
'ios',
'express',
'connect',
'r',
'ruby',
'rails3',
'javascript',
)
PLATFORM_ROOTS = {
'rails3': 'ruby',
'django': 'python',
'flask': 'python',
'express': 'node.js',
'connect': 'node.js',
}
PLATFORM_TITLES = {
'rails3': 'Rails 3',
'php': 'PHP',
'ios': 'iOS',
}
| Python | 0.001441 |
2f24f483dbd8ed860556dd934c8923c89e378fce | whoops - null text, return 0 length | library/pyjamas/ui/platform/TextBoxBasemshtml.py | library/pyjamas/ui/platform/TextBoxBasemshtml.py | class TextBoxBase:
def getCursorPos(self):
try :
elem = self.getElement()
tr = elem.document.selection.createRange()
if tr.parentElement().uniqueID != elem.uniqueID:
return -1
return -tr.move("character", -65535)
except:
print traceback.print_exc()
return 0
def getSelectionLength(self):
try :
elem = self.getElement()
tr = elem.document.selection.createRange()
if tr.parentElement().uniqueID != elem.uniqueID:
return 0
return tr.text and len(tr.text) or 0
except:
print traceback.print_exc()
return 0
def setSelectionRange(self, pos, length):
try :
elem = self.getElement()
tr = elem.createTextRange()
tr.collapse(True)
tr.moveStart('character', pos)
tr.moveEnd('character', length)
tr.select()
except :
print traceback.print_exc()
pass
| class TextBoxBase:
def getCursorPos(self):
try :
elem = self.getElement()
tr = elem.document.selection.createRange()
if tr.parentElement().uniqueID != elem.uniqueID:
return -1
return -tr.move("character", -65535)
except:
print traceback.print_exc()
return 0
def getSelectionLength(self):
try :
elem = self.getElement()
tr = elem.document.selection.createRange()
if tr.parentElement().uniqueID != elem.uniqueID:
return 0
return len(tr.text)
except:
print traceback.print_exc()
return 0
def setSelectionRange(self, pos, length):
try :
elem = self.getElement()
tr = elem.createTextRange()
tr.collapse(True)
tr.moveStart('character', pos)
tr.moveEnd('character', length)
tr.select()
except :
print traceback.print_exc()
pass
| Python | 0.999352 |
ce83a4fb2f650380b7683ea688791e078b6fe7ec | Fix wrong redirect on logout | src/sleepy/web/views.py | src/sleepy/web/views.py | from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, logout
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView, TemplateView
from django.utils.http import is_safe_url
from django.utils.translation import ugettext
class IndexView(TemplateView):
"""View for the index page"""
template_name = 'sleepy/web/index.html'
class LogoutView(RedirectView):
url = reverse_lazy('sleepy-home')
permanent = False
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
logout(self.request)
messages.success(request, ugettext('You have successfully logged out.'))
return super(LogoutView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
url = super(LogoutView, self).get_redirect_url(*args, **kwargs)
next_url = self.request.REQUEST.get(REDIRECT_FIELD_NAME, None)
if next_url and is_safe_url(url=next_url, host=self.request.get_host()):
url = next_url
return url
| from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, logout
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView, TemplateView
from django.utils.http import is_safe_url
from django.utils.translation import ugettext
class IndexView(TemplateView):
"""View for the index page"""
template_name = 'sleepy/web/index.html'
class LogoutView(RedirectView):
url = reverse_lazy('home')
permanent = False
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
logout(self.request)
messages.success(request, ugettext('You have successfully logged out.'))
return super(LogoutView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
url = super(LogoutView, self).get_redirect_url(*args, **kwargs)
next_url = self.request.REQUEST.get(REDIRECT_FIELD_NAME, None)
if next_url and is_safe_url(url=next_url, host=self.request.get_host()):
url = next_url
return url
| Python | 0.000003 |
800bd152baa9eef06f647e98994b0a5b9f4b2012 | Update manual_matches for new format | membership/management/commands/manual_matches.py | membership/management/commands/manual_matches.py | # encoding: UTF-8
from __future__ import with_statement
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
import logging
logger = logging.getLogger("manual_matches")
from datetime import datetime, timedelta
from membership.models import Bill, BillingCycle, Payment, Membership
from membership.utils import log_change
import codecs
import csv
import os
import sys
from datetime import datetime
from decimal import Decimal
def process_csv(filename):
"""Actual CSV file processing logic
"""
num_attached = num_notattached = 0
sum_attached = sum_notattached = 0
num_nomember = num_nopayment = num_nocycle = num_old = 0
log_user = User.objects.get(id=1)
with open(filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
(mid, year, date, reference, transaction) = row
try:
membership = Membership.objects.get(id=int(mid))
cycle = find_cycle(membership, year)
payment = Payment.objects.get(transaction_id=transaction)
if payment.billingcycle: # Already assigned, mark cycle as paid
if payment.billingcycle != cycle:
mark_cycle_paid(cycle, log_user, "One payment for several billing cycles")
num_old += 1
continue
payment.attach_to_cycle(cycle)
log_change(payment, log_user, change_message="Attached by manual_matches")
num_attached += 1
sum_attached += payment.amount
continue
except Membership.DoesNotExist:
logger.warning("Membership %s not found. transaction id: %s" % (mid, transaction))
# Payment references a billing cycle for a removed member - ignore
ignore_payment(transaction, log_user, "no member")
num_nomember += 1
except BillingCycle.DoesNotExist:
# Payment references a legacy billing cycle - ignore payment
ignore_payment(transaction, log_user, "legacy payment")
num_nocycle += 1
except Payment.DoesNotExist:
logger.warning("No transaction found for id: %s, member: %s year: %s. Marking as paid anyway" % (transaction, mid, year))
mark_cycle_paid(cycle, log_user, "Paid by a legacy payment (before 2010)")
num_nopayment += 1
num_notattached = num_notattached + 1
logger.info("Processed %s payments, attached %s payments, total %.2f EUR. Unidentified payments: %s" %
(num_attached + num_notattached, num_attached, sum_attached, num_notattached))
logger.info("No members: %s, no cycle: %s, no payment in db: %s, already attached to a cycle: %s" %
(num_nomember, num_nocycle, num_nopayment, num_old))
def find_cycle(membership, year):
return membership.billingcycle_set.filter(start__year=year).latest('start')
def ignore_payment(transaction, log_user, reason):
try:
payment = Payment.objects.get(transaction_id=transaction)
if not payment.ignore:
payment.ignore = True
payment.save()
log_change(payment, log_user, change_message="Ignored by manual_matches: %s" % reason)
except Payment.DoesNotExist:
pass
def mark_cycle_paid(cycle, log_user, reason):
if not cycle.is_paid:
cycle.is_paid = True
cycle.save()
log_change(cycle, log_user, change_message=reason)
class Command(BaseCommand):
args = '<csvfile>'
help = 'Import manual matches of payments'
def handle(self, csvfile, **options):
logger.info("Starting the processing of file %s." %
os.path.abspath(csvfile))
process_csv(csvfile)
try:
pass
#process_csv(csvfile)
except Exception, e:
print "Fatal error: %s" % unicode(e)
logger.error("process_csv failed: %s" % unicode(e))
sys.exit(1)
logger.info("Done processing file %s." % os.path.abspath(csvfile))
| # encoding: UTF-8
from __future__ import with_statement
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
import logging
logger = logging.getLogger("sikteeri.membership.management.commands.manual_matches")
from datetime import datetime, timedelta
from membership.models import Bill, BillingCycle, Payment, Membership
from membership.utils import log_change
import codecs
import csv
import os
import sys
from datetime import datetime
from decimal import Decimal
import logging
logger = logging.getLogger("csvbills")
def process_csv(filename):
"""Actual CSV file processing logic
"""
num_attached = num_notattached = 0
sum_attached = sum_notattached = 0
log_user = User.objects.get(id=1)
with open(filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
(mid, date, transaction) = row
try:
payment = Payment.objects.get(transaction_id=transaction)
if payment.billingcycle: # Already assigned, do nothing
continue
membership = Membership.objects.get(id=int(mid))
cycle = find_cycle(membership, payment)
payment.attach_to_cycle(cycle)
log_change(payment, log_user, change_message="Attached by manual_matches")
num_attached = num_attached + 1
sum_attached = sum_attached + payment.amount
continue
except Payment.DoesNotExist:
logger.warning("No transactinon found for id: %s, member :%s" % (transaction, mid))
except Membership.DoesNotExist:
logger.warning("Membership %s not found. transaction id: %s" % (mid, transaction))
# Payment references a billing cycle for a removed member - ignore
if not payment.ignore:
payment.ignore = True
payment.save()
log_change(payment, log_user, change_message="Ignored by manual_matches: no member")
except BillingCycle.DoesNotExist:
# Payment references a legacy billing cycle - ignore
if not payment.ignore:
payment.ignore = True
payment.save()
log_change(payment, log_user, change_message="Ignored by manual_matches: no matching billing cycle")
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
logger.info("Processed %s payments total %.2f EUR. Unidentified payments: %s (%.2f EUR)" %
(num_attached + num_notattached, sum_attached + sum_notattached, num_notattached,
sum_notattached))
def find_cycle(membership, payment):
cycles = membership.billingcycle_set.filter(is_paid=False).order_by('-start')
for cycle in cycles:
if cycle.first_bill_sent_on() < payment.payment_day:
return cycle
raise BillingCycle.DoesNotExist("No billing cycles found older than payment date")
class Command(BaseCommand):
args = '<csvfile>'
help = 'Import manual matches of payments'
def handle(self, csvfile, **options):
logger.info("Starting the processing of file %s." %
os.path.abspath(csvfile))
try:
process_csv(csvfile)
except Exception, e:
print "Fatal error: %s" % unicode(e)
logger.error("process_csv failed: %s" % unicode(e))
sys.exit(1)
logger.info("Done processing file %s." % os.path.abspath(csvfile))
| Python | 0 |
e8389c211ef56869cd9c6c1177aa6a610a915aa2 | Fix manifest and add format to properties | combine/manifest.py | combine/manifest.py | # Copyright (c) 2010 John Reese
# Licensed under the MIT license
import yaml
from combine import CombineError
MANIFEST_FORMAT = 1
class Manifest:
def __init__(self):
self.properties = {"manifest-format": MANIFEST_FORMAT}
self.actions = []
def add_property(self, name, value):
self.properties[name] = value
def add_action(self, action):
self.actions.append(action)
def to_dict(self):
"""
Generate a dictionary representation of the Manifest object.
"""
return dict(self.properties, actions=self.actions)
@classmethod
def from_dict(cls, data):
"""
Given a dictionary object, generate a new Manifest object.
"""
format = data["manifest-format"]
if (format > MANIFEST_FORMAT or format < 0):
raise CombineError("Unsupported manifest format")
mft = Manifest()
for key, value in data.items():
if key == "actions":
for action in value:
mft.add_action(action)
else:
mft.add_property(key, value)
for action in data["actions"]:
mft.add_action(action)
return mft
def to_yaml(self):
"""
Generate a YAML data string representing the Manifest object.
"""
str = yaml.safe_dump(self.to_dict(), default_flow_style=False)
return str
@classmethod
def from_yaml(cls, str):
"""
Given a string of YAML data, generate a new Manifest object.
"""
data = yaml.safe_load(str)
return cls.from_dict(data)
| # Copyright (c) 2010 John Reese
# Licensed under the MIT license
import yaml
from combine import Change, CombineError
MANIFEST_FORMAT = 1
class Manifest:
def __init__(self):
self.properties = {}
self.actions = []
def add_property(self, name, value):
self.properties[name] = value
def add_action(self, action):
self.actions.append(action)
def to_dict(self):
"""
Generate a dictionary representation of the Manifest object.
"""
return dict(self.properties, actions=self.actions)
@classmethod
def from_dict(cls, data):
"""
Given a dictionary object, generate a new Manifest object.
"""
format = data["manifest-format"]
if (format > MANIFEST_FORMAT or format < 0):
raise CombineError("Unsupported manifest format")
mft = Manifest()
for key, value in data.items():
if key == "actions":
for action in value:
mft.add_action(action)
else:
mft.add_property(key, value)
for action in data["actions"]:
mft.add_action(action)
return mft
def to_yaml(self):
"""
Generate a YAML data string representing the Manifest object.
"""
str = yaml.safe_dump(self.to_dict(), default_flow_style=False)
return str
@classmethod
def from_yaml(cls, str):
"""
Given a string of YAML data, generate a new Manifest object.
"""
data = yaml.safe_load(str)
return cls.from_dict(data)
| Python | 0 |
291f11c6325a1ae082845be81692bc64521eab7e | refactor create-kdtree script | py/legacypipe/create-kdtrees.py | py/legacypipe/create-kdtrees.py | import os
from astrometry.libkd.spherematch import *
from astrometry.util.fits import fits_table
import numpy as np
# This script creates the survey-ccd-*.kd.fits kd-trees from
# survey-ccds-*.fits.gz (zeropoints) files
#
def create_kdtree(infn, outfn):
readfn = infn
# gunzip
if infn.endswith('.gz'):
tfn = '/tmp/ccds.fits'
cmd = 'gunzip -cd %s > %s' % (infn, tfn)
print(cmd)
rtn = os.system(cmd)
assert(rtn == 0)
readfn = tfn
# startree
sfn = '/tmp/startree.fits'
cmd = 'startree -i %s -o %s -P -T -k -n ccds' % (readfn, sfn)
print(cmd)
rtn = os.system(cmd)
assert(rtn == 0)
# add expnum-tree
T = fits_table(sfn, columns=['expnum'])
ekd = tree_build(np.atleast_2d(T.expnum.copy()).T.astype(float),
nleaf=60, bbox=False, split=True)
ekd.set_name('expnum')
efn = '/tmp/ekd.fits'
ekd.write(efn)
# merge
cmd = 'fitsgetext -i %s -o /tmp/ekd-%%02i -a -M' % (efn)
print(cmd)
rtn = os.system(cmd)
assert(rtn == 0)
cmd = 'cat %s /tmp/ekd-0[123456] > %s' % (sfn, outfn)
rtn = os.system(cmd)
assert(rtn == 0)
def pre_depthcut():
indir = '/global/projecta/projectdirs/cosmo/work/legacysurvey/dr8/DECaLS/'
outdir = '/global/cscratch1/sd/dstn/dr8new'
bands = 'grizY'
for band in bands:
infn = indir + 'survey-ccds-decam-%s.fits.gz' % band
print('Input:', infn)
outfn = outdir + '/survey-ccds-decam-%s.kd.fits' % band
create_kdtree(infn, outfn)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infn', help='Input filename (CCDs file)')
parser.add_argument('outfn', help='Output filename (survey-ccds-X.kd.fits file')
opt = parser.parse_args()
create_kdtree(opt.infn, opt.outfn)
| import os
from astrometry.libkd.spherematch import *
from astrometry.util.fits import fits_table
import numpy as np
# This script creates the survey-ccd-*.kd.fits kd-trees from
# survey-ccds-*.fits.gz (zeropoints) files
#
indir = '/global/projecta/projectdirs/cosmo/work/legacysurvey/dr8/DECaLS/'
outdir = '/global/cscratch1/sd/dstn/dr8new'
bands = 'grizY'
for band in bands:
infn = indir + 'survey-ccds-decam-%s.fits.gz' % band
print('Input:', infn)
# gunzip
tfn = '/tmp/survey-ccd-%s.fits' % band
cmd = 'gunzip -cd %s > %s' % (infn, tfn)
print(cmd)
os.system(cmd)
# startree
sfn = '/tmp/startree-%s.fits' % band
cmd = 'startree -i %s -o %s -P -T -k -n ccds' % (tfn, sfn)
print(cmd)
os.system(cmd)
# add expnum-tree
T = fits_table(sfn, columns=['expnum'])
ekd = tree_build(np.atleast_2d(T.expnum.copy()).T.astype(float),
nleaf=60, bbox=False, split=True)
ekd.set_name('expnum')
efn = '/tmp/ekd-%s.fits' % band
ekd.write(efn)
# merge
cmd = 'fitsgetext -i %s -o /tmp/ekd-%s-%%02i -a -M' % (efn, band)
print(cmd)
os.system(cmd)
outfn = outdir + '/survey-ccds-decam-%s.kd.fits' % band
cmd = 'cat %s /tmp/ekd-%s-0[123456] > %s' % (sfn, band, outfn)
os.system(cmd)
| Python | 0.000004 |
7b746d2d4ae732ee1eae326254f3a6df676a7973 | Add __str__ function for SgTable | components/table.py | components/table.py | """A class to store tables."""
class SgTable:
"""A class to store tables."""
def __init__(self):
self._fields = []
self._table = []
def __len__(self):
return len(self._table)
def __iter__(self):
for row in self._table:
yield row
def __getitem__(self, key):
if not ((type(key) == int or type(key) == long) and key >= 0 and key < len(self._table)):
raise ValueError("Index illegal")
else:
return self._table[key]
def __setitem__(self, key, value):
if not ((type(key) == int or type(key) == long) and key >= 0 and key < len(self._table)):
raise ValueError("Index illegal")
else:
self._table[key] = value
def __str__(self):
ret = str(self._fields)
for row in self._table:
ret += "\n" + str(row)
return ret
def Append(self, row):
self._table.append(row)
def GetTable(self):
return self._table
def SetTable(self, table):
self._table = table
def GetFields(self):
return self._fields
def SetFields(self, fields):
self._fields = fields
| """A class to store tables."""
class SgTable:
"""A class to store tables."""
def __init__(self):
self._fields = []
self._table = []
def __len__(self):
return len(self._table)
def __iter__(self):
for row in self._table:
yield row
def __getitem__(self, key):
if not ((type(key) == int or type(key) == long) and key >= 0 and key < len(self._table)):
raise ValueError("Index illegal")
else:
return self._table[key]
def __setitem__(self, key, value):
if not ((type(key) == int or type(key) == long) and key >= 0 and key < len(self._table)):
raise ValueError("Index illegal")
else:
self._table[key] = value
def Append(self, row):
self._table.append(row)
def GetTable(self):
return self._table
def SetTable(self, table):
self._table = table
def GetFields(self):
return self._fields
def SetFields(self, fields):
self._fields = fields
| Python | 0.999052 |
8a45ca4dff9957a6fce07dfa067633fcd842bc51 | Update cpp.py | conda/libdev/cpp.py | conda/libdev/cpp.py | import os
from SCons.Defaults import Delete
def generate(env):
"""Add Builders and construction variables to the Environment."""
if not 'cpp' in env['TOOLS'][:-1]:
env.Tool('system')
env.Tool('prefix')
def BuildCpp(env, target, sources):
# Code to build "target" from "source"
SYSTEM = env['SYSTEM']
targets = env.Install(os.path.join(env['PREFIX'], "include", *target.split('_')),
[source for source in sources if source.suffix in ['.h', '.hpp', '.hxx', '.h++']])
if SYSTEM == 'osx':
kwargs = dict(FRAMEWORKSFLAGS = '-flat_namespace -undefined suppress')
else:
kwargs = dict()
targets += env.SharedLibrary(os.path.join(env['PREFIX'], "lib", target),
[source for source in sources if source.suffix in ['.c', '.cpp', '.cxx', '.c++']],
**kwargs)
if SYSTEM == 'win':
dll = [target for target in targets if target.suffix == '.dll'].pop()
exp = [target for target in targets if target.suffix == '.exp'].pop()
lib = [target for target in targets if target.suffix == '.lib'].pop()
targets = [target for target in targets if not target.suffix in ['.dll', '.exp', '.lib']]
targets += env.Install(os.path.join(env['PREFIX'], "bin"), dll)
targets += env.Command(lib, [exp, dll], [Delete("$SOURCE")])
return targets
env.AddMethod(BuildCpp)
def exists(env):
return 1
| import os
from SCons.Defaults import Move
def generate(env):
"""Add Builders and construction variables to the Environment."""
if not 'cpp' in env['TOOLS'][:-1]:
env.Tool('system')
env.Tool('prefix')
def BuildCpp(env, target, sources):
# Code to build "target" from "source"
SYSTEM = env['SYSTEM']
targets = env.Install(os.path.join(env['PREFIX'], "include", *target.split('_')),
[source for source in sources if source.suffix in ['.h', '.hpp', '.hxx', '.h++']])
if SYSTEM == 'osx':
kwargs = dict(FRAMEWORKSFLAGS = '-flat_namespace -undefined suppress')
else:
kwargs = dict()
targets += env.SharedLibrary(os.path.join(env['PREFIX'], "lib", target),
[source for source in sources if source.suffix in ['.c', '.cpp', '.cxx', '.c++']],
**kwargs)
if SYSTEM == 'win':
dll = [target for target in targets if target.suffix == '.dll'].pop()
exp = [target for target in targets if target.suffix == '.exp'].pop()
lib = [target for target in targets if target.suffix == '.lib'].pop()
targets = [target for target in targets if not target.suffix in ['.dll', '.exp', '.lib']]
targets += env.Install(os.path.join(env['PREFIX'], "bin"), dll)
targets += env.Command(lib, [exp, dll], [Delete("$SOURCE")])
return targets
env.AddMethod(BuildCpp)
def exists(env):
return 1
| Python | 0.000001 |
e58b94f29888ac1c48bec77cb08fc90919c7720b | add filename attribute | src/twelve_tone/midi.py | src/twelve_tone/midi.py | from miditime.miditime import MIDITime
class MIDIFile(object):
def __init__(self, BPM=120, filename='example.mid'):
self.pattern = MIDITime(BPM, filename)
self.step_counter = 0
self.filename = filename
def create(self, notes):
midinotes = []
offset = 60
attack = 200
beats = 1
for note in notes:
pitch = (note - 1) + offset
midinote = [self.step_counter, pitch, attack, beats]
midinotes.append(midinote)
self.step_counter = self.step_counter + 1
# Add a track with those notes
self.pattern.add_track(midinotes)
# Output the .mid file
self.pattern.save_midi()
| from miditime.miditime import MIDITime
class MIDIFile(object):
def __init__(self, BPM=120, filename='example.mid'):
self.pattern = MIDITime(BPM, filename)
self.step_counter = 0
def create(self, notes):
midinotes = []
offset = 60
attack = 200
beats = 1
for note in notes:
pitch = (note - 1) + offset
midinote = [self.step_counter, pitch, attack, beats]
midinotes.append(midinote)
self.step_counter = self.step_counter + 1
# Add a track with those notes
self.pattern.add_track(midinotes)
# Output the .mid file
self.pattern.save_midi()
| Python | 0.000002 |
70f0d321325f3a7d9966c11c39dfb2ef6ecea97e | add testcase for SNMPv3 | scripts/cli/test_service_snmp.py | scripts/cli/test_service_snmp.py | #!/usr/bin/env python3
#
# Copyright (C) 2019-2020 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import unittest
from vyos.validate import is_ipv4
from psutil import process_iter
import vyos.config
import vyos.configsession
import vyos.util as util
SNMPD_CONF = '/etc/snmp/snmpd.conf'
base_path = ['service', 'snmp']
def get_config_value(key):
tmp = util.read_file(SNMPD_CONF)
return re.findall(r'\n?{}\s+(.*)'.format(key), tmp)
class TestSystemNameServer(unittest.TestCase):
def setUp(self):
self.session = vyos.configsession.ConfigSession(os.getpid())
env = self.session.get_session_env()
self.config = vyos.config.Config(session_env=env)
def tearDown(self):
# Delete SNNP configuration
self.session.delete(base_path)
self.session.commit()
def test_snmp(self):
""" Check if SNMP can be configured and service runs """
clients = ['192.0.2.1', '2001:db8::1']
networks = ['192.0.2.128/25', '2001:db8:babe::/48']
listen = ['127.0.0.1', '::1']
for auth in ['ro', 'rw']:
community = 'VyOS' + auth
self.session.set(base_path + ['community', community, 'authorization', auth])
for client in clients:
self.session.set(base_path + ['community', community, 'client', client])
for network in networks:
self.session.set(base_path + ['community', community, 'network', network])
for addr in listen:
self.session.set(base_path + ['listen-address', addr])
self.session.set(base_path + ['contact', 'maintainers@vyos.io'])
self.session.set(base_path + ['location', 'qemu'])
self.session.commit()
# verify listen address, it will be returned as
# ['unix:/run/snmpd.socket,udp:127.0.0.1:161,udp6:[::1]:161']
# thus we need to transfor this into a proper list
config = get_config_value('agentaddress')[0]
expected = 'unix:/run/snmpd.socket'
for addr in listen:
if is_ipv4(addr):
expected += ',udp:{}:161'.format(addr)
else:
expected += ',udp6:[{}]:161'.format(addr)
self.assertTrue(expected in config)
# Check for running process
self.assertTrue("snmpd" in (p.name() for p in process_iter()))
def test_snmpv3(self):
""" Check if SNMPv3 can be configured and service runs"""
self.session.set(base_path + ['v3', 'engineid', '0xaffedeadbeef'])
self.session.set(base_path + ['v3', 'group', 'default', 'mode', 'ro'])
# check validate() - a view must be created before this can be comitted
with self.assertRaises(vyos.configsession.ConfigSessionError):
self.session.commit()
self.session.set(base_path + ['v3', 'view', 'default', 'oid', '1'])
self.session.set(base_path + ['v3', 'group', 'default', 'view', 'default'])
self.session.commit()
# create user
for authpriv in ['auth', 'privacy']:
self.session.set(base_path + ['v3', 'user', 'vyos', authpriv, 'plaintext-key', 'vyos1234'])
self.session.set(base_path + ['v3', 'user', 'vyos', 'group', 'default'])
# TODO: read in config file and check values
# Check for running process
self.assertTrue("snmpd" in (p.name() for p in process_iter()))
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python3
#
# Copyright (C) 2019-2020 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import unittest
from vyos.validate import is_ipv4
from psutil import process_iter
import vyos.config
import vyos.configsession
import vyos.util as util
SNMPD_CONF = '/etc/snmp/snmpd.conf'
base_path = ['service', 'snmp']
def get_config_value(key):
tmp = util.read_file(SNMPD_CONF)
return re.findall(r'\n?{}\s+(.*)'.format(key), tmp)
class TestSystemNameServer(unittest.TestCase):
def setUp(self):
self.session = vyos.configsession.ConfigSession(os.getpid())
env = self.session.get_session_env()
self.config = vyos.config.Config(session_env=env)
def tearDown(self):
# Delete SNNP configuration
self.session.delete(base_path)
self.session.commit()
def test_snmp(self):
""" Check if SNMP can be configured and service runs """
clients = ['192.0.2.1', '2001:db8::1']
networks = ['192.0.2.128/25', '2001:db8:babe::/48']
listen = ['127.0.0.1', '::1']
for auth in ['ro', 'rw']:
community = 'VyOS' + auth
self.session.set(base_path + ['community', community, 'authorization', auth])
for client in clients:
self.session.set(base_path + ['community', community, 'client', client])
for network in networks:
self.session.set(base_path + ['community', community, 'network', network])
for addr in listen:
self.session.set(base_path + ['listen-address', addr])
self.session.set(base_path + ['contact', 'maintainers@vyos.io'])
self.session.set(base_path + ['location', 'qemu'])
self.session.commit()
# verify listen address, it will be returned as
# ['unix:/run/snmpd.socket,udp:127.0.0.1:161,udp6:[::1]:161']
# thus we need to transfor this into a proper list
config = get_config_value('agentaddress')[0]
expected = 'unix:/run/snmpd.socket'
for addr in listen:
if is_ipv4(addr):
expected += ',udp:{}:161'.format(addr)
else:
expected += ',udp6:[{}]:161'.format(addr)
self.assertTrue(expected in config)
# Check for running process
self.assertTrue("snmpd" in (p.name() for p in process_iter()))
if __name__ == '__main__':
unittest.main()
| Python | 0.000004 |
aa46499c43bd7e4162dc657fa898b1df5e2dcee9 | Exclude windows from extended ascii mode because travis is unhappy | src/compas/__main__.py | src/compas/__main__.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import pkg_resources
import compas
if __name__ == '__main__':
c = 'DCDHDCACDHDCAEDEACDHDCAEDEACDHDCAEDCDEACDHDCADCACDEADHDCAEDADEACDHDADADADHDCACDCAEDEACDCACDHDCAEDEACDCAEDEACDCAEDBACDHDAEDEACDADADCAEDBADHDAGDEACDADEADCAEDEADHDBADEDCAEDEACDEDAGDHDADCAEDACDCADADADHDAGDADEACAEDADBADHDAGDCADEAEDEACDBADHDAGDCAEDADEACDBADHDBADADADADAGDHDAGDCADEDADBADHDBADADAGDHDEADEAEDEAEDADHDEADEDADEDADHDEACDADCAEDHDACDADCADHDEACDADCAEDHDEACDADCAEDHDEACDADCAEDHDEAFCDADCAEDHDEAEDHDEDH' # noqa: E501
r = 'fGfB]DSD]BYBHEIEHCXBUCFBYBFCUBSBEBOEOBEBSBQBEPBGBPBEQBOBDBRIRBDBOBNEUGUENBLBECRBCBCBCBRCEBLBKBDBBBDBNBCBEBCBNBDBBBDBKBKDBFCDBIDIDIBDCFBDKBJDBKCCCDDKBCDCCCKBDJBIBDPCBBCBMBCBBCPDBIBIERBCBBBCGCBCDREIBIDBQDEBDCDBEDQBDIBIDBOBDIBCBIBCBOBDIBIDBNBCBKCKBCBNBDIBIBDMDMCMDMDBIBJDBHBFNCNGHBDJBJBDGkGDBJBKBDFBGB[BGBFEKBLBDHCPCPCHELBMBDBWCWBDBMBOEBUCUBEOBPBEBSCSBEBPBRBEBQCQBEBRBUBECMCMCECTBXBFBDGCGDGCWB[DXC[BbObB' # noqa: E501
maps = ' !-X_`|\n' if compas.IPY or compas.WINDOWS else ' ▌▀█▄`▐\n'
for n, o in zip(r, c):
print((ord(n) - 65) * maps[ord(o) - 65], end='')
print()
print('Yay! COMPAS is installed correctly!')
print()
print('COMPAS: {}'.format(compas.__version__))
print('Python: {}'.format(str(sys.version)))
working_set = pkg_resources.working_set
packages = set([p.project_name for p in working_set]) - set(['COMPAS'])
compas_pkgs = [p for p in packages if p.lower().startswith('compas')]
if compas_pkgs:
print('Installed COMPAS extensions: {}'.format([p for p in compas_pkgs]))
| # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import pkg_resources
import compas
if __name__ == '__main__':
c = 'DCDHDCACDHDCAEDEACDHDCAEDEACDHDCAEDCDEACDHDCADCACDEADHDCAEDADEACDHDADADADHDCACDCAEDEACDCACDHDCAEDEACDCAEDEACDCAEDBACDHDAEDEACDADADCAEDBADHDAGDEACDADEADCAEDEADHDBADEDCAEDEACDEDAGDHDADCAEDACDCADADADHDAGDADEACAEDADBADHDAGDCADEAEDEACDBADHDAGDCAEDADEACDBADHDBADADADADAGDHDAGDCADEDADBADHDBADADAGDHDEADEAEDEAEDADHDEADEDADEDADHDEACDADCAEDHDACDADCADHDEACDADCAEDHDEACDADCAEDHDEACDADCAEDHDEAFCDADCAEDHDEAEDHDEDH' # noqa: E501
r = 'fGfB]DSD]BYBHEIEHCXBUCFBYBFCUBSBEBOEOBEBSBQBEPBGBPBEQBOBDBRIRBDBOBNEUGUENBLBECRBCBCBCBRCEBLBKBDBBBDBNBCBEBCBNBDBBBDBKBKDBFCDBIDIDIBDCFBDKBJDBKCCCDDKBCDCCCKBDJBIBDPCBBCBMBCBBCPDBIBIERBCBBBCGCBCDREIBIDBQDEBDCDBEDQBDIBIDBOBDIBCBIBCBOBDIBIDBNBCBKCKBCBNBDIBIBDMDMCMDMDBIBJDBHBFNCNGHBDJBJBDGkGDBJBKBDFBGB[BGBFEKBLBDHCPCPCHELBMBDBWCWBDBMBOEBUCUBEOBPBEBSCSBEBPBRBEBQCQBEBRBUBECMCMCECTBXBFBDGCGDGCWB[DXC[BbObB' # noqa: E501
maps = ' !-X_`|\n' if compas.IPY else ' ▌▀█▄`▐\n'
for n, o in zip(r, c):
print((ord(n) - 65) * maps[ord(o) - 65], end='')
print()
print('Yay! COMPAS is installed correctly!')
print()
print('COMPAS: {}'.format(compas.__version__))
print('Python: {}'.format(str(sys.version)))
working_set = pkg_resources.working_set
packages = set([p.project_name for p in working_set]) - set(['COMPAS'])
compas_pkgs = [p for p in packages if p.lower().startswith('compas')]
if compas_pkgs:
print('Installed COMPAS extensions: {}'.format([p for p in compas_pkgs]))
| Python | 0 |
ee9646c5e71dcbaf776d9f9f929dead5e5c1fa82 | Revert "cookie.value() didn't really need to be a string, since QSettings will take a QVariant anyways." | python/pyphantomjs/cookiejar.py | python/pyphantomjs/cookiejar.py | '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PyQt4.QtCore import QSettings
from PyQt4.QtNetwork import QNetworkCookie, QNetworkCookieJar
class CookieJar(QNetworkCookieJar):
def __init__(self, parent, cookiesFile):
super(CookieJar, self).__init__(parent)
self.m_cookiesFile = cookiesFile
def setCookiesFromUrl(self, cookieList, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
settings.beginGroup(url.host())
for cookie in cookieList:
settings.setValue(str(cookie.name()), str(cookie.value()))
settings.sync()
return True
def cookiesForUrl(self, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
cookieList = []
settings.beginGroup(url.host())
for cname in settings.childKeys():
cookieList.append(QNetworkCookie(cname, settings.value(cname)))
return cookieList
| '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PyQt4.QtCore import QSettings
from PyQt4.QtNetwork import QNetworkCookie, QNetworkCookieJar
class CookieJar(QNetworkCookieJar):
def __init__(self, parent, cookiesFile):
super(CookieJar, self).__init__(parent)
self.m_cookiesFile = cookiesFile
def setCookiesFromUrl(self, cookieList, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
settings.beginGroup(url.host())
for cookie in cookieList:
settings.setValue(str(cookie.name()), cookie.value())
settings.sync()
return True
def cookiesForUrl(self, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
cookieList = []
settings.beginGroup(url.host())
for cname in settings.childKeys():
cookieList.append(QNetworkCookie(cname, settings.value(cname)))
return cookieList
| Python | 0 |
1c8bd21fe895260254684d3e2b2f9f5b70fdb91f | Fix error msg | python/smurff/smurff/prepare.py | python/smurff/smurff/prepare.py | import numpy as np
import scipy as sp
import pandas as pd
import scipy.sparse
import numbers
from .helper import SparseTensor
def make_train_test(Y, ntest):
"""Splits a sparse matrix Y into a train and a test matrix.
Y scipy sparse matrix (coo_matrix, csr_matrix or csc_matrix)
ntest either a float below 1.0 or integer.
if float, then indicates the ratio of test cells
if integer, then indicates the number of test cells
returns Ytrain, Ytest (type coo_matrix)
"""
if type(Y) not in [sp.sparse.coo.coo_matrix, sp.sparse.csr.csr_matrix, sp.sparse.csc.csc_matrix]:
raise TypeError("Unsupported Y type: " + str(type(Y)))
if not isinstance(ntest, numbers.Real) or ntest < 0:
raise TypeError("ntest has to be a non-negative number (number or ratio of test samples).")
Y = Y.tocoo(copy = False)
if ntest < 1:
ntest = Y.nnz * ntest
ntest = int(round(ntest))
rperm = np.random.permutation(Y.nnz)
train = rperm[ntest:]
test = rperm[0:ntest]
Ytrain = sp.sparse.coo_matrix( (Y.data[train], (Y.row[train], Y.col[train])), shape=Y.shape )
Ytest = sp.sparse.coo_matrix( (Y.data[test], (Y.row[test], Y.col[test])), shape=Y.shape )
return Ytrain, Ytest
def make_train_test_df(Y, ntest, shape = None):
"""Splits rows of dataframe Y into a train and a test dataframe.
Y pandas dataframe
ntest either a float below 1.0 or integer.
if float, then indicates the ratio of test cells
if integer, then indicates the number of test cells
returns Ytrain, Ytest (type coo_matrix)
"""
if type(Y) != pd.core.frame.DataFrame:
raise TypeError("Y should be DataFrame.")
if not isinstance(ntest, numbers.Real) or ntest < 0:
raise TypeError("ntest has to be a non-negative number (number or ratio of test samples).")
## randomly spliting train-test
if ntest < 1:
ntest = Y.shape[0] * ntest
ntest = int(round(ntest))
rperm = np.random.permutation(Y.shape[0])
train = rperm[ntest:]
test = rperm[0:ntest]
Ytrain = SparseTensor(Y.iloc[train], shape)
Ytest = SparseTensor(Y.iloc[test], Ytrain.shape)
return Ytrain, Ytest
| import numpy as np
import scipy as sp
import pandas as pd
import scipy.sparse
import numbers
from .helper import SparseTensor
def make_train_test(Y, ntest):
"""Splits a sparse matrix Y into a train and a test matrix.
Y scipy sparse matrix (coo_matrix, csr_matrix or csc_matrix)
ntest either a float below 1.0 or integer.
if float, then indicates the ratio of test cells
if integer, then indicates the number of test cells
returns Ytrain, Ytest (type coo_matrix)
"""
if type(Y) not in [sp.sparse.coo.coo_matrix, sp.sparse.csr.csr_matrix, sp.sparse.csc.csc_matrix]:
raise TypeError("Unsupported Y type: %s" + type(Y))
if not isinstance(ntest, numbers.Real) or ntest < 0:
raise TypeError("ntest has to be a non-negative number (number or ratio of test samples).")
Y = Y.tocoo(copy = False)
if ntest < 1:
ntest = Y.nnz * ntest
ntest = int(round(ntest))
rperm = np.random.permutation(Y.nnz)
train = rperm[ntest:]
test = rperm[0:ntest]
Ytrain = sp.sparse.coo_matrix( (Y.data[train], (Y.row[train], Y.col[train])), shape=Y.shape )
Ytest = sp.sparse.coo_matrix( (Y.data[test], (Y.row[test], Y.col[test])), shape=Y.shape )
return Ytrain, Ytest
def make_train_test_df(Y, ntest, shape = None):
"""Splits rows of dataframe Y into a train and a test dataframe.
Y pandas dataframe
ntest either a float below 1.0 or integer.
if float, then indicates the ratio of test cells
if integer, then indicates the number of test cells
returns Ytrain, Ytest (type coo_matrix)
"""
if type(Y) != pd.core.frame.DataFrame:
raise TypeError("Y should be DataFrame.")
if not isinstance(ntest, numbers.Real) or ntest < 0:
raise TypeError("ntest has to be a non-negative number (number or ratio of test samples).")
## randomly spliting train-test
if ntest < 1:
ntest = Y.shape[0] * ntest
ntest = int(round(ntest))
rperm = np.random.permutation(Y.shape[0])
train = rperm[ntest:]
test = rperm[0:ntest]
Ytrain = SparseTensor(Y.iloc[train], shape)
Ytest = SparseTensor(Y.iloc[test], Ytrain.shape)
return Ytrain, Ytest
| Python | 0.000023 |
980b3eded1e06c8f152b873531273c1b0154a755 | Update Visualization-commandCenter.py | dataCenter/Visualization-commandCenter.py | dataCenter/Visualization-commandCenter.py | import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import pickle
with open('firefox-bot/config/iframe.txt', 'r') as loginInfo:
newName = loginInfo.readline()
newName = newName.rstrip()
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
stats = load_obj('firefox-bot/statistics/' + newName)
print(stats)
d = stats['draws']
comItems = ('skill', 'super')
y_pos = np.arange(len(comItems))
width=(1/5)
for index, item in enumerate(comItems):
plt.bar(index, stats[item], width, label=item + ' ' + str(round((stats[item]/d)*100, 3)) + '%')
#' frequency: 1 / ' + str(round(spins/stats[item])))
if(stats[item]):
print(item, '1 out of ', round(d/stats[item]), ' draws')
plt.legend(loc='best')
plt.xticks(y_pos, comItems)
plt.ylabel('total collected')
plt.xlabel('items')
plt.title('totalDraws: ' + str(int(d)))
plt.show()
| import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import pickle
with open('firefox-bot/config/iframe.txt', 'r') as loginInfo:
newName = loginInfo.readline()
newName = newName.rstrip()
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
stats = load_obj('firefox-bot/statistics/' + newName')
print(stats)
d = stats['draws']
comItems = ('skill', 'super')
y_pos = np.arange(len(comItems))
width=(1/5)
for index, item in enumerate(comItems):
plt.bar(index, stats[item], width, label=item + ' ' + str(round((stats[item]/d)*100, 3)) + '%')
#' frequency: 1 / ' + str(round(spins/stats[item])))
if(stats[item]):
print(item, '1 out of ', round(d/stats[item]), ' draws')
plt.legend(loc='best')
plt.xticks(y_pos, comItems)
plt.ylabel('total collected')
plt.xlabel('items')
plt.title('totalDraws: ' + str(int(d)))
plt.show()
| Python | 0 |
18be6e0d3ee656f150e54bc0abe3959d92e2b35c | add message for script completion to dashboard | cea/api.py | cea/api.py | """
Provide access to the scripts exported by the City Energy Analyst.
"""
from __future__ import print_function
import datetime
def register_scripts():
import cea.config
import cea.scripts
import importlib
config = cea.config.Configuration()
def script_wrapper(cea_script):
module_path = cea_script.module
script_module = importlib.import_module(module_path)
def script_runner(config=config, **kwargs):
option_list = cea_script.parameters
config.restrict_to(option_list)
for section, parameter in config.matching_parameters(option_list):
parameter_py_name = parameter.name.replace('-', '_')
if parameter_py_name in kwargs:
parameter.set(kwargs[parameter_py_name])
# run the script
cea_script.print_script_configuration(config)
t0 = datetime.datetime.now()
script_module.main(config)
# print success message
msg = "Script completed. Execution time: %.2fs" % (datetime.datetime.now() - t0).total_seconds()
print("")
print("-" * len(msg))
print(msg)
if script_module.__doc__:
script_runner.__doc__ = script_module.__doc__.strip()
else:
script_runner.__doc__ = 'FIXME: Add API documentation to {}'.format(module_path)
return script_runner
for cea_script in sorted(cea.scripts.list_scripts()):
script_py_name = cea_script.name.replace('-', '_')
globals()[script_py_name] = script_wrapper(cea_script)
register_scripts()
if __name__ == '__main__':
print(demand.__doc__) | """
Provide access to the scripts exported by the City Energy Analyst.
"""
from __future__ import print_function
def register_scripts():
import cea.config
import cea.scripts
import importlib
config = cea.config.Configuration()
def script_wrapper(cea_script):
module_path = cea_script.module
script_module = importlib.import_module(module_path)
def script_runner(config=config, **kwargs):
option_list = cea_script.parameters
config.restrict_to(option_list)
for section, parameter in config.matching_parameters(option_list):
parameter_py_name = parameter.name.replace('-', '_')
if parameter_py_name in kwargs:
parameter.set(kwargs[parameter_py_name])
# run the script
cea_script.print_script_configuration(config)
script_module.main(config)
if script_module.__doc__:
script_runner.__doc__ = script_module.__doc__.strip()
else:
script_runner.__doc__ = 'FIXME: Add API documentation to {}'.format(module_path)
return script_runner
for cea_script in sorted(cea.scripts.list_scripts()):
script_py_name = cea_script.name.replace('-', '_')
globals()[script_py_name] = script_wrapper(cea_script)
register_scripts()
if __name__ == '__main__':
print(demand.__doc__) | Python | 0 |
ef5c049a4c32e69c9ce88c958ae8272bdfddeba4 | Add area info in check price result | check_price.py | check_price.py | # -*- coding:utf-8 -*-
import pymysql
import pymysql.cursors
from prettytable import PrettyTable
from colorama import init, Fore
import pdb
database_name = "house_price_04"
# 打开数据库连接
db=pymysql.connect("localhost","root","aB123456",database_name,charset='utf8mb4')
# 使用cursor()方法获取操作游标
cursor=db.cursor()
#输入要查询的小区名称
data=[]
def main():
global check_name
check_name= input("请输入小区名称:");
#用于存储查询到包含关键字的小区信息
header = '地区 id 小区名称 价格 在售'.split()
pt = PrettyTable()
pt._set_field_names(header)
#获取所有table
tables=show_tables()
for table in tables:
select_info(table)
for row in data:
# row_list=list(row)
new_row=[
Fore.GREEN + row[0] + Fore.RESET,
row[1],
Fore.GREEN + row[2] + Fore.RESET,
Fore.RED + str(row[3]) + Fore.RESET,
row[4],
]
pt.add_row(new_row)
print(pt)
def show_tables():
sql="show tables;"
try:
cursor.execute(sql)
tables=cursor.fetchall()
except:
print ("Error: unable to fetch table data")
return tables
def select_info(table):
sql = "SELECT * FROM %s;" % table
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
name=row[1]
if(check_name in name):
area= table[0]
rowList= list(row)
rowList.insert(0,area)
data.append(rowList)
except:
print ("Error: unable to 小区 data")
if __name__ == '__main__':
main() | # -*- coding:utf-8 -*-
import pymysql
import pymysql.cursors
from prettytable import PrettyTable
from colorama import init, Fore
database_name = "house_price_04"
# 打开数据库连接
db=pymysql.connect("localhost","root","aB123456",database_name,charset='utf8mb4')
# 使用cursor()方法获取操作游标
cursor=db.cursor()
#输入要查询的小区名称
check_name= input("请输入小区名称:");
#用于存储查询到包含关键字的小区信息
data=[]
def main():
header = 'id 小区名称 价格 在售'.split()
pt = PrettyTable()
pt._set_field_names(header)
#获取所有table
tables=show_tables()
for table in tables:
select_info(table)
for row in data:
row_list=list(row)
new_row=[
row[0],
Fore.GREEN + row[1] + Fore.RESET,
Fore.RED + str(row[2]) + Fore.RESET,
row[3],
]
pt.add_row(new_row)
print(pt)
def show_tables():
sql="show tables;"
try:
cursor.execute(sql)
tables=cursor.fetchall()
except:
print ("Error: unable to fetch data")
return tables
def select_info(table):
sql = "SELECT * FROM %s;" % table
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
name=row[1]
if(check_name in name):
data.append(row)
except:
print ("Error: unable to fetch data")
if __name__ == '__main__':
main() | Python | 0 |
864669eb606f0831c6503894c87c62ea3841654e | fix for HUnion | hwt/hdl/types/utils.py | hwt/hdl/types/utils.py | from typing import Union, List
from hwt.hdl.types.array import HArray
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct
from hwt.hdl.types.typeCast import toHVal
from hwt.hdl.types.union import HUnion
from hwt.hdl.value import Value
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
def walkFlattenFields(sigOrVal: Union[RtlSignalBase, Value], skipPadding=True):
"""
Walk all simple values in HStruct or HArray
"""
t = sigOrVal._dtype
if isinstance(t, Bits):
yield sigOrVal
elif isinstance(t, HUnion):
yield from walkFlattenFields(sigOrVal._val, skipPadding=skipPadding)
elif isinstance(t, HStruct):
for f in t.fields:
isPadding = f.name is None
if not isPadding or not skipPadding:
if isPadding:
v = f.dtype.from_py(None)
else:
v = getattr(sigOrVal, f.name)
yield from walkFlattenFields(v)
elif isinstance(t, HArray):
for item in sigOrVal:
yield from walkFlattenFields(item)
elif isinstance(t, HStream):
assert isinstance(sigOrVal, Value), sigOrVal
for v in sigOrVal:
yield from walkFlattenFields(v)
else:
raise NotImplementedError(t)
def HdlValue_unpack(t: HdlType,
data: List[Union[Value, RtlSignalBase, int]],
getDataFn=None, dataWidth=None):
"""
Parse raw Bits array to a value of specified HdlType
"""
if getDataFn is None:
assert dataWidth is not None
def _getDataFn(x):
return toHVal(x)._auto_cast(Bits(dataWidth))
getDataFn = _getDataFn
val = t.from_py(None)
fData = iter(data)
# actual is storage variable for items from frameData
actualOffset = 0
actual = None
for v in walkFlattenFields(val, skipPadding=False):
# walk flatten fields and take values from fData and parse them to
# field
required = v._dtype.bit_length()
if actual is None:
actualOffset = 0
try:
actual = getDataFn(next(fData))
except StopIteration:
raise ValueError("Input data too short")
if dataWidth is None:
dataWidth = actual._dtype.bit_length()
actuallyHave = dataWidth
else:
actuallyHave = actual._dtype.bit_length() - actualOffset
while actuallyHave < required:
# collect data for this field
try:
d = getDataFn(next(fData))
except StopIteration:
raise ValueError("Input data too short")
actual = d._concat(actual)
actuallyHave += dataWidth
if actuallyHave >= required:
# parse value of actual to field
# skip padding
_v = actual[(required + actualOffset):actualOffset]
_v = _v._auto_cast(v._dtype)
v.val = _v.val
v.vld_mask = _v.vld_mask
# update slice out what was taken
actuallyHave -= required
actualOffset += required
if actuallyHave == 0:
actual = None
if actual is not None:
assert actual._dtype.bit_length(
) - actualOffset < dataWidth, (
"It should be just a padding at the end of frame"
)
return val
def is_only_padding(t: HdlType):
if isinstance(t, HStruct):
for f in t.fields:
if f.name is not None and not is_only_padding(f.dtype):
return False
return True
elif isinstance(t, (HArray, HStream)):
return is_only_padding(t.element_t)
return False
| from typing import Union, List
from hwt.hdl.types.array import HArray
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct
from hwt.hdl.types.typeCast import toHVal
from hwt.hdl.types.union import HUnion
from hwt.hdl.value import Value
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
def walkFlattenFields(sigOrVal: Union[RtlSignalBase, Value], skipPadding=True):
"""
Walk all simple values in HStruct or HArray
"""
t = sigOrVal._dtype
if isinstance(t, Bits):
yield sigOrVal
elif isinstance(t, HUnion):
yield from walkFlattenFields(sigOrVal._val, skipPadding=skipPadding)
elif isinstance(t, HStruct):
for f in t.fields:
isPadding = f.name is None
if not isPadding or not skipPadding:
if isPadding:
v = f.dtype.from_py(None)
else:
v = getattr(sigOrVal, f.name)
yield from walkFlattenFields(v)
elif isinstance(t, HArray):
for item in sigOrVal:
yield from walkFlattenFields(item)
elif isinstance(t, HStream):
assert isinstance(sigOrVal, Value), sigOrVal
for v in sigOrVal:
yield from walkFlattenFields(v)
else:
raise NotImplementedError(t)
def HdlValue_unpack(t: HdlType,
data: List[Union[Value, RtlSignalBase, int]],
getDataFn=None, dataWidth=None):
"""
Parse raw Bits array to a value of specified HdlType
"""
if getDataFn is None:
assert dataWidth is not None
def _getDataFn(x):
return toHVal(x)._auto_cast(Bits(dataWidth))
getDataFn = _getDataFn
val = t.from_py(None)
fData = iter(data)
# actual is storage variable for items from frameData
actualOffset = 0
actual = None
for v in walkFlattenFields(val, skipPadding=False):
# walk flatten fields and take values from fData and parse them to
# field
required = v._dtype.bit_length()
if actual is None:
actualOffset = 0
try:
actual = getDataFn(next(fData))
except StopIteration:
raise ValueError("Input data too short")
if dataWidth is None:
dataWidth = actual._dtype.bit_length()
actuallyHave = dataWidth
else:
actuallyHave = actual._dtype.bit_length() - actualOffset
while actuallyHave < required:
# collect data for this field
try:
d = getDataFn(next(fData))
except StopIteration:
raise ValueError("Input data too short")
actual = d._concat(actual)
actuallyHave += dataWidth
if actuallyHave >= required:
# parse value of actual to field
# skip padding
_v = actual[(required + actualOffset):actualOffset]
_v = _v._auto_cast(v._dtype)
v.val = _v.val
v.vld_mask = _v.vld_mask
# update slice out what was taken
actuallyHave -= required
actualOffset += required
if actuallyHave == 0:
actual = None
if actual is not None:
assert actual._dtype.bit_length(
) - actualOffset < dataWidth, (
"It should be just a padding at the end of frame"
)
return val
def is_only_padding(t: HdlType):
if isinstance(t, (HStruct, HUnion)):
for f in t.fields:
if f.name is not None and not is_only_padding(f.dtype):
return False
return True
elif isinstance(t, (HArray, HStream)):
return is_only_padding(t.element_t)
return False
| Python | 0.000005 |
94dfdbeae55d4c47c7b1161c68795429ebc0687a | fix pprintInterface for unit with array intf | hwt/simulator/utils.py | hwt/simulator/utils.py | from random import Random
import sys
from hwt.serializer.serializerClases.indent import getIndent
from hwt.synthesizer.interfaceLevel.interfaceUtils.proxy import InterfaceProxy
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
def valueHasChanged(valA, valB):
return valA.val is not valB.val or valA.vldMask != valB.vldMask
def agent_randomize(agent, timeQuantum, seed):
random = Random(seed)
def randomEnProc(simulator):
# small space at start to modify agents when they are inactive
yield simulator.wait(timeQuantum / 4)
while True:
agent.enable = random.random() < 0.5
delay = int(random.random() * timeQuantum)
yield simulator.wait(delay)
return randomEnProc
def pprintInterface(intf, prefix="", indent=0, file=sys.stdout):
"""
Pretty print interface
"""
try:
s = intf._sig
except AttributeError:
s = ""
if s is not "":
s = " " + repr(s)
file.write("".join([getIndent(indent), prefix, repr(intf._getFullName()), s]))
file.write("\n")
for i in intf._interfaces:
if isinstance(intf, InterfaceProxy):
assert isinstance(i, InterfaceProxy), (intf, i)
pprintInterface(i, indent=indent + 1, file=file)
if intf._arrayElemCache:
assert len(intf) == len(intf._arrayElemCache)
for i, p in enumerate(intf):
pprintInterface(p, prefix="p%d:" % i, indent=indent + 1, file=file)
def pprintAgents(unitOrIntf, indent=0, prefix="", file=sys.stdout):
if isinstance(unitOrIntf, InterfaceBase):
ag = unitOrIntf._ag
arrayElemCache = unitOrIntf._arrayElemCache
else:
ag = None
arrayElemCache = None
if ag is not None:
file.write("%s%s%r\n" % (getIndent(indent), prefix, ag))
elif arrayElemCache:
file.write("%s%s\n" % (getIndent(indent), prefix + unitOrIntf._name + ":"))
for i in unitOrIntf._interfaces:
pprintAgents(i, indent + 1, file=file)
if arrayElemCache:
assert len(unitOrIntf) == len(arrayElemCache)
for i, p in enumerate(unitOrIntf):
pprintAgents(p, indent + 1, prefix="p%d:" % i, file=file)
| from random import Random
import sys
from hwt.serializer.serializerClases.indent import getIndent
from hwt.synthesizer.interfaceLevel.interfaceUtils.proxy import InterfaceProxy
def valueHasChanged(valA, valB):
return valA.val is not valB.val or valA.vldMask != valB.vldMask
def agent_randomize(agent, timeQuantum, seed):
random = Random(seed)
def randomEnProc(simulator):
# small space at start to modify agents when they are inactive
yield simulator.wait(timeQuantum / 4)
while True:
agent.enable = random.random() < 0.5
delay = int(random.random() * timeQuantum)
yield simulator.wait(delay)
return randomEnProc
def pprintInterface(intf, prefix="", indent=0, file=sys.stdout):
"""
Pretty print interface
"""
try:
s = intf._sig
except AttributeError:
s = ""
if s is not "":
s = repr(s)
file.write("".join([getIndent(indent), prefix, repr(intf._getFullName()), " ", s]))
file.write("\n")
for i in intf._interfaces:
if isinstance(intf, InterfaceProxy):
assert isinstance(i, InterfaceProxy), (intf, i)
pprintInterface(i, indent=indent + 1, file=file)
if intf._arrayElemCache:
assert len(intf) == len(intf._arrayElemCache)
for i, p in enumerate(intf):
pprintInterface(p, prefix="p%d:" % i, indent=indent + 1, file=file)
def _pprintAgents(intf, indent, prefix="", file=sys.stdout):
if intf._ag is not None:
file.write("%s%s%r\n" % (getIndent(indent), prefix, intf._ag))
for i in intf._interfaces:
_pprintAgents(i, indent + 1, file=file)
if intf._arrayElemCache:
assert len(intf) == len(intf._arrayElemCache)
for i, p in enumerate(intf):
_pprintAgents(p, indent + 1, prefix="p%d:" % i, file=file)
def pprintAgents(unitOrIntf, indent=0, file=sys.stdout):
"""
Pretty print agents
"""
prefix = unitOrIntf._name + ":"
for intf in unitOrIntf._interfaces:
_pprintAgents(intf, indent, prefix, file=file)
| Python | 0 |
222e2bf4728440fdff2675756b4aa08aba4585fb | Update __init__.py | app/__init__.py | app/__init__.py | from flask import Flask, render_template
from flask.ext.mail import Mail
from flask.ext.login import LoginManager
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.pagedown import PageDown
from flask.ext.flatpages import FlatPages
from config import config
from .util import assets
mail = Mail()
moment = Moment()
pagedown = PageDown()
pages = FlatPages()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask.ext.sslify import SSLify
sslify = SSLify(app)
mail.init_app(app)
moment.init_app(app)
pagedown.init_app(app)
pages.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from main import main as main_blueprint
from .auth import auth as auth_blueprint
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
app.register_blueprint(main_blueprint)
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
return app
| from flask import Flask, render_template
from flask.ext.mail import Mail
from flask.ext.login import LoginManager
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.pagedown import PageDown
from flask.ext.flatpages import FlatPages
from config import config
mail = Mail()
moment = Moment()
pagedown = PageDown()
pages = FlatPages()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask.ext.sslify import SSLify
sslify = SSLify(app)
mail.init_app(app)
moment.init_app(app)
pagedown.init_app(app)
pages.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from main import main as main_blueprint
from .auth import auth as auth_blueprint
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
app.register_blueprint(main_blueprint)
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
return app
| Python | 0.000072 |
690696493f110899282ad22f9b02d3d0fd91fe31 | Rewrite wirecloud.catalogue.admin module | src/wirecloud/catalogue/admin.py | src/wirecloud/catalogue/admin.py | # -*- coding: utf-8 -*-
# Copyright (c) 2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from wirecloud.catalogue.models import CatalogueResource
class CatalogueResourceAdmin(admin.ModelAdmin):
search_fields = ('vendor', 'short_name', 'version', 'author')
list_display = ('vendor', 'short_name', 'version', 'resource_type')
verbose_name_plural = 'Resources'
admin.site.register(CatalogueResource, CatalogueResourceAdmin)
| # -*- coding: utf-8 -*-
#...............................licence...........................................
#
# (C) Copyright 2008 Telefonica Investigacion y Desarrollo
# S.A.Unipersonal (Telefonica I+D)
#
# This file is part of Morfeo EzWeb Platform.
#
# Morfeo EzWeb Platform is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Morfeo EzWeb Platform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Morfeo EzWeb Platform. If not, see <http://www.gnu.org/licenses/>.
#
# Info about members and contributors of the MORFEO project
# is available at
#
# http://morfeo-project.org
#
#...............................licence...........................................#
#
from django.contrib import admin
from wirecloud.catalogue.models import CatalogueResource, WidgetWiring
from wirecloud.catalogue.models import UserTag, UserVote, Tag, Category
class CategoyAdminView(admin.ModelAdmin):
filter_horizontal = ('tags',)
verbose_name_plural = 'Categories'
class CatalogueResourceAdmin(admin.ModelAdmin):
search_fields = ['short_name', 'vendor', 'author']
list_display = ['short_name', 'vendor', 'author', 'resource_type']
verbose_name_plural = 'Resources'
admin.site.register(CatalogueResource, CatalogueResourceAdmin)
admin.site.register(WidgetWiring)
admin.site.register(UserTag)
admin.site.register(UserVote)
admin.site.register(Tag)
admin.site.register(Category, CategoyAdminView)
| Python | 0.000002 |
bc467365ebd287d96109ea0771403a10d3f56580 | set upload limit | app/__init__.py | app/__init__.py | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
import os
import flask_sijax
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
app.config['SIJAX_STATIC_PATH'] = os.path.join('.', os.path.dirname(__file__), 'static/js/sijax/')
app.config['SIJAX_JSON_URI'] = '/static/js/sijax/json2.js'
flask_sijax.Sijax(app)
return app
| from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
import os
import flask_sijax
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
app.config['SIJAX_STATIC_PATH'] = os.path.join('.', os.path.dirname(__file__), 'static/js/sijax/')
app.config['SIJAX_JSON_URI'] = '/static/js/sijax/json2.js'
flask_sijax.Sijax(app)
return app
| Python | 0.000001 |
a2e5e2d5b75acafe5b1de0b92a9206a6a2ec4d25 | Fix py36 unit tests | blazar/tests/api/test_root.py | blazar/tests/api/test_root.py | # Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blazar.tests import api
class TestRoot(api.APITest):
def setUp(self):
super(TestRoot, self).setUp()
self.versions = {
"versions":
[{"status": "CURRENT",
"id": "v2.0",
"links": [{"href": "http://localhost/v2", "rel": "self"}]}]}
def test_version_discovery_root(self):
response = self.get_json('/',
expect_errors=True,
path_prefix='')
self.assertEqual(300, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertEqual(self.versions, response.json)
def test_version_discovery_versions(self):
response = self.get_json('/versions',
expect_errors=True,
path_prefix='')
self.assertEqual(300, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertEqual(self.versions, response.json)
def test_bad_uri(self):
response = self.get_json('/bad/path',
expect_errors=True,
path_prefix='')
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "text/plain")
| # Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
from blazar.tests import api
class TestRoot(api.APITest):
def setUp(self):
super(TestRoot, self).setUp()
self.versions = jsonutils.dump_as_bytes(
{"versions":
[{"status": "CURRENT",
"id": "v2.0",
"links": [{"href": "http://localhost/v2", "rel": "self"}]}]})
def test_version_discovery_root(self):
response = self.get_json('/',
expect_errors=True,
path_prefix='')
self.assertEqual(300, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertEqual(self.versions, response.body)
def test_version_discovery_versions(self):
response = self.get_json('/versions',
expect_errors=True,
path_prefix='')
self.assertEqual(300, response.status_int)
self.assertEqual("application/json", response.content_type)
self.assertEqual(self.versions, response.body)
def test_bad_uri(self):
response = self.get_json('/bad/path',
expect_errors=True,
path_prefix='')
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "text/plain")
| Python | 0.000008 |
1bde8a92f47d49c6bea286a66fe89a3ccaca80a0 | Fix for .env being loaded for manage.py commands | app/__init__.py | app/__init__.py | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
# import config here rather than at module level to ensure that .env values
# are loaded into the environment first when running manage.py
from config import config
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
# Set jquery version
from flask_bootstrap import WebCDN
app.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
'//cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/'
)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
# Tell browser not to cache any HTML responses, as most pages have
# sensitive information in them. (But CSS should be cached as normal.)
@app.after_request
def apply_caching(response):
if response.headers.get('Content-Type', '').startswith('text/html'):
response.headers['Cache-control'] = 'no-store'
response.headers['Pragma'] = 'no-cache'
return response
return app
| from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
# Set jquery version
from flask_bootstrap import WebCDN
app.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
'//cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/'
)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
# Tell browser not to cache any HTML responses, as most pages have
# sensitive information in them. (But CSS should be cached as normal.)
@app.after_request
def apply_caching(response):
if response.headers.get('Content-Type', '').startswith('text/html'):
response.headers['Cache-control'] = 'no-store'
response.headers['Pragma'] = 'no-cache'
return response
return app
| Python | 0 |
cea50cbe5e0b16758c5eada3a16d121d2880c6ce | Fix PEP8 issue | i3pystatus/pomodoro.py | i3pystatus/pomodoro.py | import subprocess
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
STOPPED = 0
RUNNING = 1
BREAK = 2
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time')
)
required = ('sound',)
color_stopped = '#2ECCFA'
color_running = '#FFFF00'
color_break = '#37FF00'
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = STOPPED
self.current_pomodoro = 0
self.total_pomodoro = self.short_break_count + 1 # and 1 long break
self.time = None
def run(self):
if self.time and datetime.utcnow() >= self.time:
if self.state == RUNNING:
self.state = BREAK
if self.breaks == self.short_break_count:
self.time = datetime.utcnow() + \
timedelta(seconds=self.long_break_duration)
else:
self.time = datetime.utcnow() + \
timedelta(seconds=self.break_duration)
text = 'Go for a break!'
else:
self.state = RUNNING
self.time = datetime.utcnow() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self.current_pomodoro = (self.current_pomodoro + 1) % self.total_pomodoro
self._alarm(text)
if self.state == RUNNING or self.state == BREAK:
min, sec = divmod((self.time - datetime.utcnow()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
sdict = {
'time': text,
'current_pomodoro': self.current_pomodoro + 1,
'total_pomodoro': self.total_pomodoro
}
color = self.color_running if self.state == RUNNING else self.color_break
text = self.format.format(**sdict)
else:
text = 'Start pomodoro',
color = self.color_stopped
self.output = {
'full_text': text,
'color': color
}
def start(self):
self.state = RUNNING
self.time = datetime.utcnow() + timedelta(seconds=self.pomodoro_duration)
self.current_pomodoro = 0
def stop(self):
self.state = STOPPED
self.time = None
def _alarm(self, text):
subprocess.call(['notify-send',
'Alarm!',
text])
subprocess.Popen(['aplay',
self.sound,
'-q'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
| import subprocess
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
STOPPED = 0
RUNNING = 1
BREAK = 2
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time')
)
required = ('sound',)
color_stopped = '#2ECCFA'
color_running = '#FFFF00'
color_break = '#37FF00'
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = STOPPED
self.current_pomodoro = 0
self.total_pomodoro = self.short_break_count + 1 # and 1 long break
self.time = None
def run(self):
if self.time and datetime.utcnow() >= self.time:
if self.state == RUNNING:
self.state = BREAK
if self.breaks == self.short_break_count:
self.time = datetime.utcnow() + \
timedelta(seconds=self.long_break_duration)
else:
self.time = datetime.utcnow() + \
timedelta(seconds=self.break_duration)
text = 'Go for a break!'
else:
self.state = RUNNING
self.time = datetime.utcnow() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self.current_pomodoro = (self.current_pomodoro + 1) % self.total_pomodoro
self._alarm(text)
if self.state == RUNNING or self.state == BREAK:
min, sec = divmod((self.time - datetime.utcnow()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
sdict = {
'time': text,
'current_pomodoro': self.current_pomodoro + 1,
'total_pomodoro': self.total_pomodoro,
}
color = self.color_running if self.state == RUNNING else self.color_break
text = self.format.format(**sdict)
else:
text = 'Start pomodoro',
color = self.color_stopped
self.output = {
'full_text': text,
'color': color
}
def start(self):
self.state = RUNNING
self.time = datetime.utcnow() + timedelta(seconds=self.pomodoro_duration)
self.current_pomodoro = 0
def stop(self):
self.state = STOPPED
self.time = None
def _alarm(self, text):
subprocess.call(['notify-send',
'Alarm!',
text])
subprocess.Popen(['aplay',
self.sound,
'-q'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
| Python | 0 |
fb236951e1658beb32bd6dc45cf8d49a4636162a | Add tests for repr on tables | blaze/api/tests/test_table.py | blaze/api/tests/test_table.py | from blaze.api.table import Table, compute, table_repr
from blaze.data.python import Python
from blaze.compute.core import compute
from blaze.compute.python import compute
from datashape import dshape
import pandas as pd
data = (('Alice', 100),
('Bob', 200))
t = Table(data, columns=['name', 'amount'])
def test_resources():
assert t.resources() == {t: t.data}
def test_compute():
assert compute(t) == data
def test_compute():
assert list(compute(t['amount'] + 1)) == [101, 201]
def test_create_with_schema():
t = Table(data, schema='{name: string, amount: float32}')
assert t.schema == dshape('{name: string, amount: float32}')
def test_create_with_raw_data():
t = Table(data, columns=['name', 'amount'])
assert t.schema == dshape('{name: string, amount: int64}')
assert t.name
assert t.data == data
def test_create_with_data_descriptor():
schema='{name: string, amount: int64}'
ddesc = Python(data, schema=schema)
t = Table(ddesc)
assert t.schema == dshape(schema)
assert t.name
assert t.data == ddesc
def test_repr():
result = table_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = table_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = Table(tuple((i, i**2) for i in range(100)), columns=['x', 'y'])
result = table_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
def test_mutable_backed_repr():
mutable_data = [range(2)]
mutable_backed_table = Table(mutable_data, columns=["mutable"])
repr(mutable_backed_table)
def test_dataframe_backed_repr():
mutable_data = range(2)
df = pd.DataFrame(data=mutable_data, columns=["mutable"])
dataframe_backed_table = Table(df)
repr(dataframe_backed_table)
| from blaze.api.table import Table, compute, table_repr
from blaze.data.python import Python
from blaze.compute.core import compute
from blaze.compute.python import compute
from datashape import dshape
data = (('Alice', 100),
('Bob', 200))
t = Table(data, columns=['name', 'amount'])
def test_resources():
assert t.resources() == {t: t.data}
def test_compute():
assert compute(t) == data
def test_compute():
assert list(compute(t['amount'] + 1)) == [101, 201]
def test_create_with_schema():
t = Table(data, schema='{name: string, amount: float32}')
assert t.schema == dshape('{name: string, amount: float32}')
def test_create_with_raw_data():
t = Table(data, columns=['name', 'amount'])
assert t.schema == dshape('{name: string, amount: int64}')
assert t.name
assert t.data == data
def test_create_with_data_descriptor():
schema='{name: string, amount: int64}'
ddesc = Python(data, schema=schema)
t = Table(ddesc)
assert t.schema == dshape(schema)
assert t.name
assert t.data == ddesc
def test_repr():
result = table_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = table_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = Table(tuple((i, i**2) for i in range(100)), columns=['x', 'y'])
result = table_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
| Python | 0.000005 |
a085573261c0ed69b6bcabc40c4914a1623dc757 | Add link to FB | bot/app/buffer.py | bot/app/buffer.py | from buffpy import API
from buffpy.managers.profiles import Profiles
from spacelaunchnow import config
hashtags = '''\n
.
.
.⠀⠀
.⠀⠀
.⠀⠀
#SpaceLaunchNow #space #spacex #nasa #rocket #mars #aerospace #earth #solarsystem #iss #elonmusk
#moonlanding #spaceshuttle #spacewalk #esa #science #picoftheday #blueorigin #Florida #Falcon9
#falconheavy #starship #ULA'''
class BufferAPI:
def __init__(self, debug=None):
if debug is None:
self.DEBUG = config.DEBUG
else:
self.DEBUG = debug
self.api = API(client_id=config.BUFFER_CLIENT_ID,
client_secret=config.BUFFER_SECRET_ID,
access_token=config.BUFFER_ACCESS_TOKEN)
def send_to_all(self, message: str = None, image: str = None, link: str = None, now: bool = False):
profiles = Profiles(api=self.api).all()
for profile in profiles:
_message = message
if profile['service'] == 'instagram' and image is None:
continue
if profile['service'] == 'twitter':
if len(_message) > 280:
_message = (_message[:277] + '...')
profile.updates.new(text=_message, photo=image, link=link, now=now)
def send_to_instagram(self, message: str = None, image: str = None, now: bool = False):
profile = Profiles(api=self.api).filter(service='instagram')[0]
return profile.updates.new(text=message, photo=image, now=now)
def send_to_facebook(self, message: str = None, image: str = None, link: str = None, now: bool = False):
profile = Profiles(api=self.api).filter(service='facebook')[0]
if link:
message = message + "\n" + link
return profile.updates.new(text=message, photo=image, now=now)
def send_to_twitter(self, message: str = None, image: str = None, link: str = None, now: bool = False):
if len(message) > 280:
message = (message[:277] + '...')
profile = Profiles(api=self.api).filter(service='twitter')[0]
return profile.updates.new(text=message, photo=image, link=link, now=now)
| from buffpy import API
from buffpy.managers.profiles import Profiles
from spacelaunchnow import config
hashtags = '''\n
.
.
.⠀⠀
.⠀⠀
.⠀⠀
#SpaceLaunchNow #space #spacex #nasa #rocket #mars #aerospace #earth #solarsystem #iss #elonmusk
#moonlanding #spaceshuttle #spacewalk #esa #science #picoftheday #blueorigin #Florida #Falcon9
#falconheavy #starship #ULA'''
class BufferAPI:
def __init__(self, debug=None):
if debug is None:
self.DEBUG = config.DEBUG
else:
self.DEBUG = debug
self.api = API(client_id=config.BUFFER_CLIENT_ID,
client_secret=config.BUFFER_SECRET_ID,
access_token=config.BUFFER_ACCESS_TOKEN)
def send_to_all(self, message: str = None, image: str = None, link: str = None, now: bool = False):
profiles = Profiles(api=self.api).all()
for profile in profiles:
_message = message
if profile['service'] == 'instagram' and image is None:
continue
if profile['service'] == 'twitter':
if len(_message) > 280:
_message = (_message[:277] + '...')
profile.updates.new(text=_message, photo=image, link=link, now=now)
def send_to_instagram(self, message: str = None, image: str = None, now: bool = False):
profile = Profiles(api=self.api).filter(service='instagram')[0]
return profile.updates.new(text=message, photo=image, now=now)
def send_to_facebook(self, message: str = None, image: str = None, link: str = None, now: bool = False):
profile = Profiles(api=self.api).filter(service='facebook')[0]
return profile.updates.new(text=message, photo=image, now=now)
def send_to_twitter(self, message: str = None, image: str = None, link: str = None, now: bool = False):
if len(message) > 280:
message = (message[:277] + '...')
profile = Profiles(api=self.api).filter(service='twitter')[0]
return profile.updates.new(text=message, photo=image, link=link, now=now)
| Python | 0 |
d89252a2bbbe0677d2ad184f4c519e2b4d6ee9bd | Add JSON to data. | bot/serializer.py | bot/serializer.py | from bot.models import Launch, Notification, DailyDigestRecord
from rest_framework import serializers
class NotificationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Notification
fields = (
'launch', 'url', 'wasNotifiedTwentyFourHour', 'wasNotifiedOneHour', 'wasNotifiedTenMinutes',
'wasNotifiedDailyDigest', 'last_twitter_post', 'last_net_stamp',
'last_net_stamp_timestamp'
)
extra_kwargs = {
'id': {'read_only': False},
'slug': {'validators': []},
}
class DailyDigestRecordSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DailyDigestRecord
fields = '__all__'
class LaunchSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Launch
fields = (
'id', 'name', 'url', 'status', 'netstamp', 'wsstamp', 'westamp', 'location_name', 'rocket_name',
'mission_name'
)
def create(self, validated_data):
launch = Launch.objects.get_or_create(**validated_data)
try:
if Notification.objects.get(launch=launch[0]) is None:
Notification.objects.get_or_create(launch=launch[0])
except:
Notification.objects.get_or_create(launch=launch[0])
return launch
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.name = validated_data.get('name', instance.name)
instance.status = validated_data.get('status', instance.status)
instance.netstamp = validated_data.get('netstamp', instance.netstamp)
instance.wsstamp = validated_data.get('wsstamp', instance.wsstamp)
instance.westamp = validated_data.get('westamp', instance.westamp)
instance.location_name = validated_data.get('location_name', instance.location_name)
instance.rocket_name = validated_data.get('rocket_name', instance.rocket_name)
instance.mission_name = validated_data.get('mission_name', instance.mission_name)
instance.save()
return instance
def get_object(self):
return self.model(self.validated_data)
| from bot.models import Launch, Notification, DailyDigestRecord
from rest_framework import serializers
class NotificationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Notification
fields = (
'launch', 'url', 'wasNotifiedTwentyFourHour', 'wasNotifiedOneHour', 'wasNotifiedTenMinutes',
'wasNotifiedDailyDigest', 'last_twitter_post', 'last_net_stamp',
'last_net_stamp_timestamp'
)
extra_kwargs = {
'id': {'read_only': False},
'slug': {'validators': []},
}
class DailyDigestRecordSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DailyDigestRecord
fields = (
'url', 'timestamp', 'messages', 'count', 'data'
)
class LaunchSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Launch
fields = (
'id', 'name', 'url', 'status', 'netstamp', 'wsstamp', 'westamp', 'location_name', 'rocket_name',
'mission_name'
)
def create(self, validated_data):
launch = Launch.objects.get_or_create(**validated_data)
try:
if Notification.objects.get(launch=launch[0]) is None:
Notification.objects.get_or_create(launch=launch[0])
except:
Notification.objects.get_or_create(launch=launch[0])
return launch
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.name = validated_data.get('name', instance.name)
instance.status = validated_data.get('status', instance.status)
instance.netstamp = validated_data.get('netstamp', instance.netstamp)
instance.wsstamp = validated_data.get('wsstamp', instance.wsstamp)
instance.westamp = validated_data.get('westamp', instance.westamp)
instance.location_name = validated_data.get('location_name', instance.location_name)
instance.rocket_name = validated_data.get('rocket_name', instance.rocket_name)
instance.mission_name = validated_data.get('mission_name', instance.mission_name)
instance.save()
return instance
def get_object(self):
return self.model(self.validated_data)
| Python | 0.000001 |
43a53981c3da2db8a4d06c883cd72442b72eb4be | Update spec_driven_model/tests/fake_mixin.py | spec_driven_model/tests/fake_mixin.py | spec_driven_model/tests/fake_mixin.py | # Copyright 2021 Akretion - Raphael Valyi <raphael.valyi@akretion.com>
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl-3.0.en.html).
from odoo import fields, models
class PoXsdMixin(models.AbstractModel):
_description = "Abstract Model for PO XSD"
_name = "spec.mixin.poxsd"
_field_prefix = "poxsd10_"
_schema_name = "poxsd"
_schema_version = "1.0"
_odoo_module = "poxsd"
_spec_module = "odoo.addons.spec_driven_model.tests.spec_poxsd"
_binding_module = "odoo.addons.spec_driven_model.tests.purchase_order_lib"
# TODO rename
brl_currency_id = fields.Many2one(
comodel_name="res.currency",
string="Moeda",
compute="_compute_brl_currency_id",
default=lambda self: self.env.ref("base.EUR").id,
)
def _compute_brl_currency_id(self):
for item in self:
item.brl_currency_id = self.env.ref("base.EUR").id
def _valid_field_parameter(self, field, name):
if name in ("xsd_type", "xsd_required", "choice"):
return True
else:
return super()._valid_field_parameter(field, name)
| # Copyright 2021 Akretion - Raphael Valyi <raphael.valyi@akretion.com>
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl-3.0.en.html).
from odoo import fields, models
class PoXsdMixin(models.AbstractModel):
_description = "Abstract Model for PO XSD"
_name = "spec.mixin.poxsd"
_field_prefix = "poxsd10_"
_schema_name = "poxsd"
_schema_version = "1.0"
_odoo_module = "poxsd"
_spec_module = "odoo.addons.spec_driven_model.tests.spec_poxsd"
_binding_module = "odoo.addons.spec_driven_model.tests.purchase_order_lib"
# TODO rename
brl_currency_id = fields.Many2one(
comodel_name="res.currency",
string="Moeda",
compute="_compute_brl_currency_id",
default=lambda self: self.env.ref("base.EUR").id,
)
def _compute_brl_currency_id(self):
for item in self:
item.brl_currency_id = self.env.ref("base.EUR").id
| Python | 0 |
2934120b3743fac2b388eba19d8c0a22b44d8f0a | Update error message | tests/core/test_evaluation_parameters.py | tests/core/test_evaluation_parameters.py | from timeit import timeit
import pytest
from great_expectations.data_asset.evaluation_parameters import parse_evaluation_parameter
from great_expectations.exceptions import EvaluationParameterError
def test_parse_evaluation_parameter():
# Substitution alone is ok
assert parse_evaluation_parameter("a", {"a": 1}) == 1
assert parse_evaluation_parameter("urn:great_expectations:validations:blarg",
{"urn:great_expectations:validations:blarg": 1}) == 1
# Very basic arithmetic is allowed as-is:
assert parse_evaluation_parameter("1 + 1", {}) == 2
# So is simple variable substitution:
assert parse_evaluation_parameter("a + 1", {"a": 2}) == 3
# URN syntax works
assert parse_evaluation_parameter("urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value * 0.9",
{"urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value": 10}) == 9
# We have basic operations (trunc)
assert parse_evaluation_parameter("urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value * 0.9",
{"urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value": 11}) != 9
assert parse_evaluation_parameter("trunc(urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value * 0.9)",
{"urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value": 11}) == 9
# Non GE URN syntax fails
with pytest.raises(EvaluationParameterError) as err:
parse_evaluation_parameter("urn:ieee:not_ge * 10", {"urn:ieee:not_ge": 1})
assert "Parse Failure" in str(err.value)
# Valid variables but invalid expression is no good
with pytest.raises(EvaluationParameterError) as err:
parse_evaluation_parameter("1 / a", {"a": 0})
assert "Error while evaluating evaluation parameter expression: division by zero" in str(err.value)
# It is okay to *substitute* strings in the expression...
assert parse_evaluation_parameter("foo", {"foo": "bar"}) == "bar"
# ...and to have whitespace in substituted values...
assert parse_evaluation_parameter("foo", {"foo": "bar "}) == "bar "
# ...but whitespace is *not* preserved from the parameter name if we evaluate it
assert parse_evaluation_parameter("foo ", {"foo": "bar"}) == "bar" # NOT "bar "
# We can use multiple parameters...
assert parse_evaluation_parameter("foo * bar", {"foo": 2, "bar": 3}) == 6
# ...but we cannot leave *partially* evaluated expressions (phew!)
with pytest.raises(EvaluationParameterError) as e:
parse_evaluation_parameter("foo + bar", {"foo": 2})
assert "Error while evaluating evaluation parameter expression: could not convert string to float" in str(e.value)
def test_parser_timing():
"""We currently reuse the parser, clearing the stack between calls, which is about 10 times faster than not
doing so. But these operations are really quick, so this may not be necessary."""
assert timeit("parse_evaluation_parameter('x', {'x': 1})",
setup="from great_expectations.data_asset.evaluation_parameters import parse_evaluation_parameter",
number=100) < 1
| from timeit import timeit
import pytest
from great_expectations.data_asset.evaluation_parameters import parse_evaluation_parameter
from great_expectations.exceptions import EvaluationParameterError
def test_parse_evaluation_parameter():
# Substitution alone is ok
assert parse_evaluation_parameter("a", {"a": 1}) == 1
assert parse_evaluation_parameter("urn:great_expectations:validations:blarg",
{"urn:great_expectations:validations:blarg": 1}) == 1
# Very basic arithmetic is allowed as-is:
assert parse_evaluation_parameter("1 + 1", {}) == 2
# So is simple variable substitution:
assert parse_evaluation_parameter("a + 1", {"a": 2}) == 3
# URN syntax works
assert parse_evaluation_parameter("urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value * 0.9",
{"urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value": 10}) == 9
# We have basic operations (trunc)
assert parse_evaluation_parameter("urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value * 0.9",
{"urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value": 11}) != 9
assert parse_evaluation_parameter("trunc(urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value * 0.9)",
{"urn:great_expectations:validations:source_patient_data.default"
":expect_table_row_count_to_equal.result.observed_value": 11}) == 9
# Non GE URN syntax fails
with pytest.raises(EvaluationParameterError) as err:
parse_evaluation_parameter("urn:ieee:not_ge * 10", {"urn:ieee:not_ge": 1})
assert "Parse Failure" in str(err.value)
# Valid variables but invalid expression is no good
with pytest.raises(EvaluationParameterError) as err:
parse_evaluation_parameter("1 / a", {"a": 0})
assert "Error while evaluating evaluation parameter expression: division by zero" in str(err.value)
# It is okay to *substitute* strings in the expression...
assert parse_evaluation_parameter("foo", {"foo": "bar"}) == "bar"
# ...and to have whitespace in substituted values...
assert parse_evaluation_parameter("foo", {"foo": "bar "}) == "bar "
# ...but whitespace is *not* preserved from the parameter name if we evaluate it
assert parse_evaluation_parameter("foo ", {"foo": "bar"}) == "bar" # NOT "bar "
# We can use multiple parameters...
assert parse_evaluation_parameter("foo * bar", {"foo": 2, "bar": 3}) == 6
# ...but we cannot leave *partially* evaluated expressions (phew!)
with pytest.raises(EvaluationParameterError) as e:
parse_evaluation_parameter("foo + bar", {"foo": 2})
assert "unsupported operand type(s) for +" in str(e.value)
def test_parser_timing():
"""We currently reuse the parser, clearing the stack between calls, which is about 10 times faster than not
doing so. But these operations are really quick, so this may not be necessary."""
assert timeit("parse_evaluation_parameter('x', {'x': 1})",
setup="from great_expectations.data_asset.evaluation_parameters import parse_evaluation_parameter",
number=100) < 1
| Python | 0 |
9b2cc65a792eb850d982653100ac948990904125 | Display microseconds in integer decimal | appstats/filters.py | appstats/filters.py | # encoding: utf-8
import json
def json_filter(value):
return json.dumps(value)
def count_filter(value):
if value is None:
return ""
count = float(value)
base = 1000
prefixes = [
('K'),
('M'),
('G'),
('T'),
('P'),
('E'),
('Z'),
('Y')
]
if count < base:
return "%.1f" % count
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if count < unit:
return "%.1f %s" % ((base * count / unit), prefix)
return "%.1f %s" % ((base * count / unit), prefix)
def time_filter(value):
if value is None:
return ""
# Transform secs into microseconds
time = float(value) * 1000000
if time < 1000:
return u"%d µs" % time
else:
time /= 1000
if time < 1000:
return "%.1f ms" % time
else:
time /= 1000
if time < 60:
return "%.1f s" % time
else:
time /= 60
if time < 60:
return "%.1f m" % time
else:
time /= 60
if time < 24:
return "%.1f h" % time
else:
time /= 24
return "%.1f d" % time
def default_filter(value):
if value is None:
return ""
return "%.1f" % float(value)
| # encoding: utf-8
import json
def json_filter(value):
return json.dumps(value)
def count_filter(value):
if value is None:
return ""
count = float(value)
base = 1000
prefixes = [
('K'),
('M'),
('G'),
('T'),
('P'),
('E'),
('Z'),
('Y')
]
if count < base:
return "%.1f" % count
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if count < unit:
return "%.1f %s" % ((base * count / unit), prefix)
return "%.1f %s" % ((base * count / unit), prefix)
def time_filter(value):
if value is None:
return ""
# Transform secs into microseconds
time = float(value) * 1000000
if time < 1000:
return u"%.1f µs" % time
else:
time /= 1000
if time < 1000:
return "%.1f ms" % time
else:
time /= 1000
if time < 60:
return "%.1f s" % time
else:
time /= 60
if time < 60:
return "%.1f m" % time
else:
time /= 60
if time < 24:
return "%.1f h" % time
else:
time /= 24
return "%.1f d" % time
def default_filter(value):
if value is None:
return ""
return "%.1f" % float(value)
| Python | 0.99978 |
f07a05f6a6edd0ef481dd9a24c1556b345fe7686 | Remove attempt to import module that no longer exists | iati/tests/conftest.py | iati/tests/conftest.py | """Configuration to exist in the global scope for pytest."""
import collections
import pytest
import iati.default
import iati.resources
import iati.tests.utilities
import iati
pytest_plugins = [ # name required by pytest # pylint: disable=invalid-name
'iati.tests.fixtures.comparison',
'iati.tests.fixtures.versions'
]
def _check_latest_version_mark(item):
"""Check that functions marked as supporting the latest version of the IATI Standard have been updated."""
latest_version_marker = item.get_marker('latest_version')
if latest_version_marker is not None:
latest_version = iati.Version(latest_version_marker.args[0])
assert latest_version == iati.version.STANDARD_VERSION_LATEST
def pytest_runtest_call(item):
"""Run operations that are called when tests are run."""
_check_latest_version_mark(item)
@pytest.fixture(params=[
('2.02', 62), # There are 38 embedded codelists at v2.02, plus 24 non-embedded codelists (which are valid for any version)
('2.01', 61), # There are 37 embedded codelists at v2.01, plus 24 non-embedded codelists (which are valid for any version)
('1.05', 59), # There are 35 embedded codelists at v1.05, plus 24 non-embedded codelists (which are valid for any version)
('1.04', 59) # There are 35 embedded codelists at v1.04, plus 24 non-embedded codelists (which are valid for any version)
])
def codelist_lengths_by_version(request): # latest_version fixture used to perform checks when adding new versions # pylint: disable=unused-argument
"""Return a tuple containing versions of the Standard, and the number of Codelists for that version.
Format: `(version=[standardVersion], expected_length=[numCodelists])`
"""
request.applymarker(pytest.mark.latest_version('2.02'))
output = collections.namedtuple('output', 'version expected_length')
return output(version=request.param[0], expected_length=request.param[1])
@pytest.fixture
def schema_ruleset():
"""Return a schema with the Standard Ruleset added.
Returns:
A valid Activity Schema with the Standard Ruleset added.
Todo:
Stop this being fixed to 2.02.
"""
schema = iati.default.activity_schema('2.02', False)
ruleset = iati.default.ruleset('2.02')
schema.rulesets.add(ruleset)
return schema
| """Configuration to exist in the global scope for pytest."""
import collections
import pytest
import iati.default
import iati.resources
import iati.tests.utilities
import iati
pytest_plugins = [ # name required by pytest # pylint: disable=invalid-name
'iati.tests.fixtures.comparison',
'iati.tests.fixtures.utility',
'iati.tests.fixtures.versions'
]
def _check_latest_version_mark(item):
"""Check that functions marked as supporting the latest version of the IATI Standard have been updated."""
latest_version_marker = item.get_marker('latest_version')
if latest_version_marker is not None:
latest_version = iati.Version(latest_version_marker.args[0])
assert latest_version == iati.version.STANDARD_VERSION_LATEST
def pytest_runtest_call(item):
"""Run operations that are called when tests are run."""
_check_latest_version_mark(item)
@pytest.fixture(params=[
('2.02', 62), # There are 38 embedded codelists at v2.02, plus 24 non-embedded codelists (which are valid for any version)
('2.01', 61), # There are 37 embedded codelists at v2.01, plus 24 non-embedded codelists (which are valid for any version)
('1.05', 59), # There are 35 embedded codelists at v1.05, plus 24 non-embedded codelists (which are valid for any version)
('1.04', 59) # There are 35 embedded codelists at v1.04, plus 24 non-embedded codelists (which are valid for any version)
])
def codelist_lengths_by_version(request): # latest_version fixture used to perform checks when adding new versions # pylint: disable=unused-argument
"""Return a tuple containing versions of the Standard, and the number of Codelists for that version.
Format: `(version=[standardVersion], expected_length=[numCodelists])`
"""
request.applymarker(pytest.mark.latest_version('2.02'))
output = collections.namedtuple('output', 'version expected_length')
return output(version=request.param[0], expected_length=request.param[1])
@pytest.fixture
def schema_ruleset():
"""Return a schema with the Standard Ruleset added.
Returns:
A valid Activity Schema with the Standard Ruleset added.
Todo:
Stop this being fixed to 2.02.
"""
schema = iati.default.activity_schema('2.02', False)
ruleset = iati.default.ruleset('2.02')
schema.rulesets.add(ruleset)
return schema
| Python | 0.000195 |
0c160c8e787a9019571f358b70633efa13cad466 | Support for inbox.util.eas in the /inbox-eas repo; this is where EAS-specific util code would live. | inbox/util/__init__.py | inbox/util/__init__.py | """ Non-server-specific utility modules. These shouldn't depend on any code
from the inbox module tree!
Don't add new code here! Find the relevant submodule, or use misc.py if
there's really no other place.
"""
# Allow out-of-tree submodules.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| """ Non-server-specific utility modules. These shouldn't depend on any code
from the inbox module tree!
Don't add new code here! Find the relevant submodule, or use misc.py if
there's really no other place.
"""
| Python | 0 |
6c28b693fdcf6a1dc481b486c6c6233ae08d72e1 | exclude thread itself from duplicates search when saving edits | askapp/forms.py | askapp/forms.py | from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
from registration.forms import RegistrationFormTermsOfService
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import Profile, Thread, Post
class RecaptchaRegistrationForm(RegistrationFormTermsOfService):
captcha = ReCaptchaField(widget=ReCaptchaWidget())
form_control = {'class': 'form-control'}
class ProfileForm(forms.ModelForm):
is_active = forms.BooleanField(required=False)
class Meta:
model = Profile
fields = ('avatar', 'country', 'city', 'about')
widgets = {
'country': forms.Select(attrs=form_control),
'city': forms.TextInput(attrs={'class': 'form-control', 'placeholder': _('Enter your city')}),
'about': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),
}
class AskappClearableFileInput(forms.widgets.ClearableFileInput):
template_with_initial = (
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
class ThreadForm(forms.ModelForm):
class Meta:
model = Thread
fields = ('thread_type', 'original', 'link', 'title', 'text', 'tags', 'image')
widgets = {
'original': forms.TextInput(),
'image': AskappClearableFileInput()
}
error_messages = {
'original': {
'invalid_choice': _('This thread is not found'),
},
}
def __init__(self, user, *args, **kwargs):
super(ThreadForm, self).__init__(*args, **kwargs)
if self.instance and not self.instance.id:
self.instance.user = user
elif not user.is_staff:
self.fields.pop('thread_type')
def clean(self):
cleaned_data = super(ThreadForm, self).clean()
link = cleaned_data.get("link")
thread_type = cleaned_data.get("thread_type")
#if thread_type and self.initial.get('thread_type', thread_type) != thread_type and not self.user.is_staff and not self.has_error('title'):
# self.add_error('title', 'You are not allowed to change the thread type')
if thread_type in Thread.TYPES_WITH_LINK and not self.has_error('link'):
if not link:
msg = _("This field is required")
self.add_error('link', msg)
else:
youtube_info = Thread(link=link).parse_youtube_url()
if youtube_info:
exists = Thread.objects.filter(link__contains=youtube_info['id']).exclude(id=self.instance.id)
if len(exists):
msg = _("Sorry, someone has already posted this video")
self.add_error('link', msg)
elif thread_type == Thread.YOUTUBE:
msg = _("This is not a Youtube URL")
self.add_error('link', msg)
elif self.has_error('link') and thread_type not in Thread.TYPES_WITH_LINK:
del self.errors['link']
class ReplyForm(forms.ModelForm):
is_answer = forms.BooleanField(required=False)
class Meta:
model = Post
fields = ('text', 'is_answer')
widgets = {
'text': forms.Textarea(attrs={'class': 'form-control', 'rows': 5}),
}
| from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
from registration.forms import RegistrationFormTermsOfService
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import Profile, Thread, Post
class RecaptchaRegistrationForm(RegistrationFormTermsOfService):
captcha = ReCaptchaField(widget=ReCaptchaWidget())
form_control = {'class': 'form-control'}
class ProfileForm(forms.ModelForm):
is_active = forms.BooleanField(required=False)
class Meta:
model = Profile
fields = ('avatar', 'country', 'city', 'about')
widgets = {
'country': forms.Select(attrs=form_control),
'city': forms.TextInput(attrs={'class': 'form-control', 'placeholder': _('Enter your city')}),
'about': forms.Textarea(attrs={'class': 'form-control', 'rows': 3}),
}
class AskappClearableFileInput(forms.widgets.ClearableFileInput):
template_with_initial = (
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
class ThreadForm(forms.ModelForm):
class Meta:
model = Thread
fields = ('thread_type', 'original', 'link', 'title', 'text', 'tags', 'image')
widgets = {
'original': forms.TextInput(),
'image': AskappClearableFileInput()
}
error_messages = {
'original': {
'invalid_choice': _('This thread is not found'),
},
}
def __init__(self, user, *args, **kwargs):
super(ThreadForm, self).__init__(*args, **kwargs)
if self.instance and not self.instance.id:
self.instance.user = user
elif not user.is_staff:
self.fields.pop('thread_type')
def clean(self):
cleaned_data = super(ThreadForm, self).clean()
link = cleaned_data.get("link")
thread_type = cleaned_data.get("thread_type")
#if thread_type and self.initial.get('thread_type', thread_type) != thread_type and not self.user.is_staff and not self.has_error('title'):
# self.add_error('title', 'You are not allowed to change the thread type')
if thread_type in Thread.TYPES_WITH_LINK and not self.has_error('link'):
if not link:
msg = _("This field is required")
self.add_error('link', msg)
else:
youtube_info = Thread(link=link).parse_youtube_url()
if youtube_info:
exists = Thread.objects.filter(link__contains=youtube_info['id'])
if len(exists):
msg = _("Sorry, someone has already posted this video")
self.add_error('link', msg)
elif thread_type == Thread.YOUTUBE:
msg = _("This is not a Youtube URL")
self.add_error('link', msg)
elif self.has_error('link') and thread_type not in Thread.TYPES_WITH_LINK:
del self.errors['link']
class ReplyForm(forms.ModelForm):
is_answer = forms.BooleanField(required=False)
class Meta:
model = Post
fields = ('text', 'is_answer')
widgets = {
'text': forms.Textarea(attrs={'class': 'form-control', 'rows': 5}),
}
| Python | 0 |
933a082a76c6c9b72aaf275f45f0d155f66eeacf | Fix Python 3.3 calling another virtualenv as a subprocess. | asv/__init__.py | asv/__init__.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
if sys.version_info >= (3, 3):
# OS X framework builds of Python 3.3 can not call other 3.3
# virtualenvs as a subprocess because `__PYENV_LAUNCHER__` is
# inherited.
if os.environ.get('__PYVENV_LAUNCHER__'):
os.unsetenv('__PYVENV_LAUNCHER__')
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
| Python | 0.000004 |
3eddbd56328e245a1f952dccbe38d121657640c3 | Add ends_with flag to fabfile if we want it to end early. | auto/fabfile.py | auto/fabfile.py | from fabric.api import *
from ssh_util import *
from collections import OrderedDict
import os, sys, json
VERBOSE = False
TASKS = [
('local', ['dump_api']),
('remote', ['parse_api', '-m 8']),
('remote', ['scrape', '-m 8']),
('local', ['download']),
('remote', ['extract']),
('local', ['create_dockets']),
('remote', ['scrape_dockets', '-m 8'])
]
ADMINS = []
EMAIL_SENDER = ''
EMAIL_API_KEY = ''
LOCK_DIR = '/tmp'
try:
from local_settings import *
except:
pass
def send_email(recipients, subject, message):
from postmark import PMMail
message = PMMail(
to = ','.join(recipients),
subject = '[regs] %s' % subject,
text_body = message,
api_key = EMAIL_API_KEY,
sender = EMAIL_SENDER
)
message.send(test=False)
def run_local(command):
os.chdir(os.path.expanduser('~/regulations-scraper/regscrape'))
out = local(' '.join([sys.executable, command]), capture=True)
return out
def run_remote(command):
with cd('~/regulations-scraper/regscrape'):
with prefix('source ~/.virtualenvs/scraper/bin/activate'):
return run(command)
def handle_completion(message, results):
output = '%s\nComplete results:\n%s' % (message, json.dumps(results, indent=4))
print output
if ADMINS:
send_email(ADMINS, message, output)
def acquire_lock():
lock_path = os.path.join(LOCK_DIR, 'regs.lock')
if os.path.exists(lock_path):
raise RuntimeError("Can't acquire lock.")
else:
lock = open(lock_path, 'w')
lock.write(str(os.getpid()))
lock.close()
def release_lock():
lock_path = os.path.join(LOCK_DIR, 'regs.lock')
os.unlink(lock_path)
@hosts(ssh_config('scraper'))
def run_regs(start_with='dump_api', end_with='scrape_dockets'):
try:
# use a lock file to keep multiple instances from trying to run simultaneously, which, among other things, consumes all of the memory on the high-CPU instance
acquire_lock()
except:
print 'Unable to acquire lock.'
if ADMINS:
send_email(ADMINS, "Aborting: can't acquire lock", "Can't start processing due to inability to acquire lock.")
sys.exit(1)
first_task_idx = [i for i in range(len(TASKS)) if TASKS[i][1][0] == start_with][0]
last_task_idx = [i for i in range(len(TASKS)) if TASKS[i][1][0] == end_with][0]
tasks = TASKS[first_task_idx:(last_task_idx+1)]
runners = {
'remote': run_remote,
'local': run_local
}
results = OrderedDict()
for func, command in tasks:
try:
output = runners[func](' '.join(['./run.py'] + command + ['--parsable']))
try:
results[command[0]] = json.loads(output)
except ValueError:
results[command[0]] = 'unable to decode results'
if VERBOSE and ADMINS:
send_email(ADMINS, 'Results of %s' % command[0], 'Results of %s:\n%s' % (command[0], json.dumps(results[command[0]], indent=4)))
except SystemExit:
results[command[0]] = 'failed'
handle_completion('Aborting at step: %s' % command[0], results)
sys.exit(1)
handle_completion('All steps completed.', results)
release_lock()
| from fabric.api import *
from ssh_util import *
from collections import OrderedDict
import os, sys, json
VERBOSE = False
TASKS = [
('local', ['dump_api']),
('remote', ['parse_api', '-m 8']),
('remote', ['scrape', '-m 8']),
('local', ['download']),
('remote', ['extract']),
('local', ['create_dockets']),
('remote', ['scrape_dockets', '-m 8'])
]
ADMINS = []
EMAIL_SENDER = ''
EMAIL_API_KEY = ''
LOCK_DIR = '/tmp'
try:
from local_settings import *
except:
pass
def send_email(recipients, subject, message):
from postmark import PMMail
message = PMMail(
to = ','.join(recipients),
subject = '[regs] %s' % subject,
text_body = message,
api_key = EMAIL_API_KEY,
sender = EMAIL_SENDER
)
message.send(test=False)
def run_local(command):
os.chdir(os.path.expanduser('~/regulations-scraper/regscrape'))
out = local(' '.join([sys.executable, command]), capture=True)
return out
def run_remote(command):
with cd('~/regulations-scraper/regscrape'):
with prefix('source ~/.virtualenvs/scraper/bin/activate'):
return run(command)
def handle_completion(message, results):
output = '%s\nComplete results:\n%s' % (message, json.dumps(results, indent=4))
print output
if ADMINS:
send_email(ADMINS, message, output)
def acquire_lock():
lock_path = os.path.join(LOCK_DIR, 'regs.lock')
if os.path.exists(lock_path):
raise RuntimeError("Can't acquire lock.")
else:
lock = open(lock_path, 'w')
lock.write(str(os.getpid()))
lock.close()
def release_lock():
lock_path = os.path.join(LOCK_DIR, 'regs.lock')
os.unlink(lock_path)
@hosts(ssh_config('scraper'))
def run_regs(start_with='dump_api'):
try:
# use a lock file to keep multiple instances from trying to run simultaneously, which, among other things, consumes all of the memory on the high-CPU instance
acquire_lock()
except:
print 'Unable to acquire lock.'
if ADMINS:
send_email(ADMINS, "Aborting: can't acquire lock", "Can't start processing due to inability to acquire lock.")
sys.exit(1)
tasks = TASKS[[i for i in range(len(TASKS)) if TASKS[i][1][0] == start_with][0]:] # eep! finds the thing to start with, then takes the subset of TASKS from then on
runners = {
'remote': run_remote,
'local': run_local
}
results = OrderedDict()
for func, command in tasks:
try:
output = runners[func](' '.join(['./run.py'] + command + ['--parsable']))
try:
results[command[0]] = json.loads(output)
except ValueError:
results[command[0]] = 'unable to decode results'
if VERBOSE and ADMINS:
send_email(ADMINS, 'Results of %s' % command[0], 'Results of %s:\n%s' % (command[0], json.dumps(results[command[0]], indent=4)))
except SystemExit:
results[command[0]] = 'failed'
handle_completion('Aborting at step: %s' % command[0], results)
sys.exit(1)
handle_completion('All steps completed.', results)
release_lock()
| Python | 0 |
6535495c6bbe17122c86eb657243d675300cc382 | add visit_Attribute to adjust numpy fns | autodiff/ast.py | autodiff/ast.py | import logging
import meta
import ast
import numpy as np
import theano
import theano.tensor as T
logger = logging.getLogger('pyautodiff')
def istensor(x):
tensortypes = (theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, tensortypes)
def isvar(x):
vartypes = (theano.tensor.sharedvar.SharedVariable,
theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, vartypes)
def get_ast(func, flags=0):
func_def = meta.decompiler.decompile_func(func)
if isinstance(func_def, ast.Lambda):
func_def = ast.FunctionDef(name='<lambda>', args=func_def.args,
body=[ast.Return(func_def.body)],
decorator_list=[])
assert isinstance(func_def, ast.FunctionDef)
return func_def
def print_ast(ast):
meta.asttools.print_ast(ast)
def print_source(ast):
meta.asttools.python_source(ast)
class TheanoTransformer(ast.NodeTransformer):
def __init__(self):
super(TheanoTransformer, self).__init__()
self.smap = dict()
def ast_wrap(self, node, method_name):
wrapped = ast.Call(args=[node],
func=ast.Attribute(attr=method_name,
ctx=ast.Load(),
value=ast.Name(ctx=ast.Load(),
id='self')),
keywords=[],
kwargs=None,
starargs=None)
return wrapped
def shadow(self, x):
if not isinstance(x, (int, float, np.ndarray)):
return x
# take special care with small ints, because CPYthon caches them.
# This makes it impossible to tell one from the other.
if isinstance(x, int) and -5 <= x <= 256:
x = np.int_(x)
elif isinstance(x, float):
x = np.float_(x)
if getattr(x, 'dtype', None) == bool:
logger.info('Warning: Theano has no bool type; upgrading to int8.')
x = x.astype('int8')
sym_x = theano.shared(x)
return self.smap.setdefault(id(x), sym_x)
def switch_numpy_theano(self, func):
# if the function comes from numpy...
if ((getattr(func, '__module__', None)
and func.__module__.startswith('numpy'))
or isinstance(func, np.ufunc)):
# try to get the theano version...
return getattr(T, func.__name__, func)
else:
return func
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, ast.Load):
node = self.ast_wrap(node, 'shadow')
return node
def visit_Attribute(self, node):
self.generic_visit(node)
node = self.ast_wrap(node, 'switch_numpy_theano')
return node
def test_run(self, f):
a = get_ast(f)
self.visit(a)
a = ast.fix_missing_locations(a)
new_globals = globals()
new_globals.update({'self' : self})
new_f = meta.decompiler.compile_func(a, '<TheanoTransformer-AST>', new_globals)
return new_f
| import logging
import meta
import ast
import numpy as np
import theano
import theano.tensor as T
logger = logging.getLogger('pyautodiff')
def istensor(x):
tensortypes = (theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, tensortypes)
def isvar(x):
vartypes = (theano.tensor.sharedvar.SharedVariable,
theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, vartypes)
def get_ast(func, flags=0):
func_def = meta.decompiler.decompile_func(func)
if isinstance(func_def, ast.Lambda):
func_def = ast.FunctionDef(name='<lambda>', args=func_def.args,
body=[ast.Return(func_def.body)],
decorator_list=[])
assert isinstance(func_def, ast.FunctionDef)
return func_def
def print_ast(ast):
meta.asttools.print_ast(ast)
def print_source(ast):
meta.asttools.python_source(ast)
class TheanoTransformer(ast.NodeTransformer):
def __init__(self):
super(TheanoTransformer, self).__init__()
self.smap = dict()
def ast_wrap(self, node, method_name):
wrapped = ast.Call(args=[node],
func=ast.Attribute(attr=method_name,
ctx=ast.Load(),
value=ast.Name(ctx=ast.Load(),
id='self')),
keywords=[],
kwargs=None,
starargs=None)
return wrapped
def shadow(self, x):
if not isinstance(x, (int, float, np.ndarray)):
return x
# take special care with small ints, because CPYthon caches them.
# This makes it impossible to tell one from the other.
if isinstance(x, int) and -5 <= x <= 256:
x = np.int_(x)
elif isinstance(x, float):
x = np.float_(x)
if getattr(x, 'dtype', None) == bool:
logger.info('Warning: Theano has no bool type; upgrading to int8.')
x = x.astype('int8')
sym_x = theano.shared(x)
return self.smap.setdefault(id(x), sym_x)
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, ast.Load):
node = self.ast_wrap(node, 'shadow')
return node
return node
def test_run(self, f):
a = get_ast(f)
self.visit(a)
a = ast.fix_missing_locations(a)
new_globals = globals()
new_globals.update({'self' : self})
new_f = meta.decompiler.compile_func(a, '<TheanoTransformer-AST>', new_globals)
return new_f
| Python | 0 |
91ef2866d14348971326df39d7868ad5c424b64c | remove the 10 article limit that was used for testing | autoindex_sk.py | autoindex_sk.py | #!/usr/bin/env python3
import sys
import csv
from bs4 import BeautifulSoup
import autoindex
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import DC, DCTERMS, SKOS, XSD
def autoindex_doc(text, url, title, date, author, place):
g = Graph()
uri = URIRef(url)
g.add((uri, DCTERMS.title, Literal(title, 'fi')))
g.add((uri, DCTERMS.issued, Literal(date, datatype=XSD.date)))
if author:
g.add((uri, DCTERMS.creator, Literal(author, 'fi')))
if place:
g.add((uri, DCTERMS.spatial, Literal(place, 'fi')))
results = autoindex.autoindex(text, 'yso-finna-fi', threshold=0.85, maxhits=3)
for result in results:
g.add((uri, DCTERMS.subject, URIRef(result['uri'])))
g.add((URIRef(result['uri']), SKOS.prefLabel, Literal(result['label'], 'fi')))
return g
def html_to_text(html):
soup = BeautifulSoup(html, 'lxml')
return soup.get_text()
reader = csv.reader(open(sys.argv[1], 'r'), delimiter='|')
for row in reader:
id = row[0]
title = html_to_text(row[1])
date = row[2].strip()
author = row[3].strip()
place = row[4].strip()
text = title + " " + html_to_text(row[6])
url = "http://sk.example.com/%s" % id
g = autoindex_doc(text, url, title, date, author, place)
g.serialize(destination=sys.stdout.buffer, format='nt')
| #!/usr/bin/env python3
import sys
import csv
from bs4 import BeautifulSoup
import autoindex
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import DC, DCTERMS, SKOS, XSD
def autoindex_doc(text, url, title, date, author, place):
g = Graph()
uri = URIRef(url)
g.add((uri, DCTERMS.title, Literal(title, 'fi')))
g.add((uri, DCTERMS.issued, Literal(date, datatype=XSD.date)))
if author:
g.add((uri, DCTERMS.creator, Literal(author, 'fi')))
if place:
g.add((uri, DCTERMS.spatial, Literal(place, 'fi')))
results = autoindex.autoindex(text, 'yso-finna-fi', threshold=0.85, maxhits=3)
for result in results:
g.add((uri, DCTERMS.subject, URIRef(result['uri'])))
g.add((URIRef(result['uri']), SKOS.prefLabel, Literal(result['label'], 'fi')))
return g
def html_to_text(html):
soup = BeautifulSoup(html, 'lxml')
return soup.get_text()
reader = csv.reader(open(sys.argv[1], 'r'), delimiter='|')
n = 0
for row in reader:
id = row[0]
title = html_to_text(row[1])
date = row[2].strip()
author = row[3].strip()
place = row[4].strip()
text = title + " " + html_to_text(row[6])
url = "http://sk.example.com/%s" % id
g = autoindex_doc(text, url, title, date, author, place)
g.serialize(destination=sys.stdout.buffer, format='nt')
n += 1
if n == 10:
break
| Python | 0 |
5d21942823ea21a3c2eb38e43b4b8b4fa2ec2ac1 | Allow mayday.us for CORS | backend/util.py | backend/util.py | """General utilities."""
import urlparse
import logging
def ConstantTimeIsEqual(a, b):
"""Securely compare two strings without leaking timing information."""
if len(a) != len(b):
return False
acc = 0
for x, y in zip(a, b):
acc |= ord(x) ^ ord(y)
return acc == 0
# TODO(hjfreyer): Pull into some kind of middleware?
def EnableCors(handler):
"""Inside a request, set the headers to allow being called cross-domain."""
if 'Origin' in handler.request.headers:
origin = handler.request.headers['Origin']
_, netloc, _, _, _, _ = urlparse.urlparse(origin)
if not (netloc == 'mayone.us' or netloc.endswith('.mayone.us') or
netloc == 'mayday.us' or netloc.endswith('.mayday.us')):
logging.warning('Invalid origin: ' + origin)
handler.error(403)
return
handler.response.headers.add_header('Access-Control-Allow-Origin', origin)
handler.response.headers.add_header('Access-Control-Allow-Methods',
'GET, POST')
handler.response.headers.add_header('Access-Control-Allow-Headers',
'content-type, origin')
| """General utilities."""
import urlparse
import logging
def ConstantTimeIsEqual(a, b):
"""Securely compare two strings without leaking timing information."""
if len(a) != len(b):
return False
acc = 0
for x, y in zip(a, b):
acc |= ord(x) ^ ord(y)
return acc == 0
# TODO(hjfreyer): Pull into some kind of middleware?
def EnableCors(handler):
"""Inside a request, set the headers to allow being called cross-domain."""
if 'Origin' in handler.request.headers:
origin = handler.request.headers['Origin']
_, netloc, _, _, _, _ = urlparse.urlparse(origin)
if not (netloc == 'mayone.us' or netloc.endswith('.mayone.us')):
logging.warning('Invalid origin: ' + origin)
handler.error(403)
return
handler.response.headers.add_header('Access-Control-Allow-Origin', origin)
handler.response.headers.add_header('Access-Control-Allow-Methods',
'GET, POST')
handler.response.headers.add_header('Access-Control-Allow-Headers',
'content-type, origin')
| Python | 0 |
7e742489017bc444f496b1f4cf6ed391caf49ba2 | allow enter to close change note type diag (#651) | aqt/modelchooser.py | aqt/modelchooser.py | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
from anki.hooks import addHook, remHook, runHook
from aqt.utils import shortcut
import aqt
class ModelChooser(QHBoxLayout):
def __init__(self, mw, widget, label=True):
QHBoxLayout.__init__(self)
self.widget = widget
self.mw = mw
self.deck = mw.col
self.label = label
self.setMargin(0)
self.setSpacing(8)
self.setupModels()
addHook('reset', self.onReset)
self.widget.setLayout(self)
def setupModels(self):
if self.label:
self.modelLabel = QLabel(_("Type"))
self.addWidget(self.modelLabel)
# models box
self.models = QPushButton()
#self.models.setStyleSheet("* { text-align: left; }")
self.models.setToolTip(shortcut(_("Change Note Type (Ctrl+N)")))
s = QShortcut(QKeySequence(_("Ctrl+N")), self.widget)
s.connect(s, SIGNAL("activated()"), self.onModelChange)
self.models.setAutoDefault(False)
self.addWidget(self.models)
self.connect(self.models, SIGNAL("clicked()"), self.onModelChange)
# layout
sizePolicy = QSizePolicy(
QSizePolicy.Policy(7),
QSizePolicy.Policy(0))
self.models.setSizePolicy(sizePolicy)
self.updateModels()
def cleanup(self):
remHook('reset', self.onReset)
def onReset(self):
self.updateModels()
def show(self):
self.widget.show()
def hide(self):
self.widget.hide()
def onEdit(self):
import aqt.models
aqt.models.Models(self.mw, self.widget)
def onModelChange(self):
from aqt.studydeck import StudyDeck
current = self.deck.models.current()['name']
# edit button
edit = QPushButton(_("Manage"))
self.connect(edit, SIGNAL("clicked()"), self.onEdit)
def nameFunc():
return sorted(self.deck.models.allNames())
ret = StudyDeck(
self.mw, names=nameFunc,
accept=_("Choose"), title=_("Choose Note Type"),
help="_notes", current=current, parent=self.widget,
buttons=[edit], cancel=False)
if not ret.name:
return
m = self.deck.models.byName(ret.name)
self.deck.conf['curModel'] = m['id']
cdeck = self.deck.decks.current()
cdeck['mid'] = m['id']
self.deck.decks.save(cdeck)
runHook("currentModelChanged")
self.mw.reset()
def updateModels(self):
self.models.setText(self.deck.models.current()['name'])
| # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
from anki.hooks import addHook, remHook, runHook
from aqt.utils import shortcut
import aqt
class ModelChooser(QHBoxLayout):
def __init__(self, mw, widget, label=True):
QHBoxLayout.__init__(self)
self.widget = widget
self.mw = mw
self.deck = mw.col
self.label = label
self.setMargin(0)
self.setSpacing(8)
self.setupModels()
addHook('reset', self.onReset)
self.widget.setLayout(self)
def setupModels(self):
if self.label:
self.modelLabel = QLabel(_("Type"))
self.addWidget(self.modelLabel)
# models box
self.models = QPushButton()
#self.models.setStyleSheet("* { text-align: left; }")
self.models.setToolTip(shortcut(_("Change Note Type (Ctrl+N)")))
s = QShortcut(QKeySequence(_("Ctrl+N")), self.widget)
s.connect(s, SIGNAL("activated()"), self.onModelChange)
self.addWidget(self.models)
self.connect(self.models, SIGNAL("clicked()"), self.onModelChange)
# layout
sizePolicy = QSizePolicy(
QSizePolicy.Policy(7),
QSizePolicy.Policy(0))
self.models.setSizePolicy(sizePolicy)
self.updateModels()
def cleanup(self):
remHook('reset', self.onReset)
def onReset(self):
self.updateModels()
def show(self):
self.widget.show()
def hide(self):
self.widget.hide()
def onEdit(self):
import aqt.models
aqt.models.Models(self.mw, self.widget)
def onModelChange(self):
from aqt.studydeck import StudyDeck
current = self.deck.models.current()['name']
# edit button
edit = QPushButton(_("Manage"))
self.connect(edit, SIGNAL("clicked()"), self.onEdit)
def nameFunc():
return sorted(self.deck.models.allNames())
ret = StudyDeck(
self.mw, names=nameFunc,
accept=_("Choose"), title=_("Choose Note Type"),
help="_notes", current=current, parent=self.widget,
buttons=[edit], cancel=False)
if not ret.name:
return
m = self.deck.models.byName(ret.name)
self.deck.conf['curModel'] = m['id']
cdeck = self.deck.decks.current()
cdeck['mid'] = m['id']
self.deck.decks.save(cdeck)
runHook("currentModelChanged")
self.mw.reset()
def updateModels(self):
self.models.setText(self.deck.models.current()['name'])
| Python | 0 |
52873e4238a54cb93f403d509d2bebef8971ec9b | Work around deprecation warning with new cssutils versions. | django_assets/filter/cssutils/__init__.py | django_assets/filter/cssutils/__init__.py | import logging
import logging.handlers
from django.conf import settings
from django_assets.filter import BaseFilter
__all__ = ('CSSUtilsFilter',)
class CSSUtilsFilter(BaseFilter):
"""Minifies CSS by removing whitespace, comments etc., using the Python
`cssutils <http://cthedot.de/cssutils/>`_ library.
Note that since this works as a parser on the syntax level, so invalid
CSS input could potentially result in data loss.
"""
name = 'cssutils'
def setup(self):
import cssutils
self.cssutils = cssutils
try:
# cssutils logs to stdout by default, hide that in production
if not settings.DEBUG:
log = logging.getLogger('assets.cssutils')
log.addHandler(logging.handlers.MemoryHandler(10))
# Newer versions of cssutils print a deprecation warning
# for 'setlog'.
if hasattr(cssutils.log, 'setLog'):
func = cssutils.log.setLog
else:
func = cssutils.log.setlog
func(log)
except ImportError:
# During doc generation, Django is not going to be setup and will
# fail when the settings object is accessed. That's ok though.
pass
def apply(self, _in, out):
sheet = self.cssutils.parseString(_in.read())
self.cssutils.ser.prefs.useMinified()
out.write(sheet.cssText) | import logging
import logging.handlers
from django.conf import settings
from django_assets.filter import BaseFilter
__all__ = ('CSSUtilsFilter',)
class CSSUtilsFilter(BaseFilter):
"""Minifies CSS by removing whitespace, comments etc., using the Python
`cssutils <http://cthedot.de/cssutils/>`_ library.
Note that since this works as a parser on the syntax level, so invalid
CSS input could potentially result in data loss.
"""
name = 'cssutils'
def setup(self):
import cssutils
self.cssutils = cssutils
try:
# cssutils logs to stdout by default, hide that in production
if not settings.DEBUG:
log = logging.getLogger('assets.cssutils')
log.addHandler(logging.handlers.MemoryHandler(10))
cssutils.log.setlog(log)
except ImportError:
# During doc generation, Django is not going to be setup and will
# fail when the settings object is accessed. That's ok though.
pass
def apply(self, _in, out):
sheet = self.cssutils.parseString(_in.read())
self.cssutils.ser.prefs.useMinified()
out.write(sheet.cssText) | Python | 0 |
52d804aac69bceb9dee9c1b21044551b80bcdfdc | Fix handling default for `--output` option in `people_search` cmd. | linkedin_scraper/commands/people_search.py | linkedin_scraper/commands/people_search.py | from getpass import getpass
from scrapy.commands.crawl import Command as BaseCommand
def sanitize_query(query):
return query.replace(' ', '+')
class Command(BaseCommand):
def short_desc(self):
return "Scrap people from LinkedIn"
def syntax(self):
return "[options] <query>"
def add_options(self, parser):
super().add_options(parser)
parser.add_option('-u', '--username', help='Name of LinkedIn account')
parser.add_option('-p', '--password',
help='Password for LinkedIn account')
def process_options(self, args, opts):
opts.output = opts.output or 'results.csv'
super().process_options(args, opts)
people_search_options = {
'query': sanitize_query(args[0]),
'username': opts.username or input(
'Please provide your LinkedIn username: '),
'password': opts.password or getpass(
'Please provide password for your LinkedIn account: ')
}
opts.spargs.update(people_search_options)
def run(self, args, opts):
# Run people_search spider
args = ['people_search']
super().run(args, opts)
| from getpass import getpass
from scrapy.commands.crawl import Command as BaseCommand
def sanitize_query(query):
return query.replace(' ', '+')
class Command(BaseCommand):
def short_desc(self):
return "Scrap people from LinkedIn"
def syntax(self):
return "[options] <query>"
def add_options(self, parser):
super().add_options(parser)
parser.add_option('-u', '--username', help='Name of LinkedIn account')
parser.add_option('-p', '--password',
help='Password for LinkedIn account')
def process_options(self, args, opts):
super().process_options(args, opts)
opts.output = opts.output or 'results.csv'
people_search_options = {
'query': sanitize_query(args[0]),
'username': opts.username or input(
'Please provide your LinkedIn username: '),
'password': opts.password or getpass(
'Please provide password for your LinkedIn account: ')
}
opts.spargs.update(people_search_options)
def run(self, args, opts):
# Run people_search spider
args = ['people_search']
super().run(args, opts)
| Python | 0 |
637b3c36e9a5952fc29ceaa705703e94f9f172d3 | Update app_settings.py | django_project/wms_client/app_settings.py | django_project/wms_client/app_settings.py | # coding=utf-8
"""Settings file for WMS Client.
"""
from django.conf import settings
# Allow base django project to override settings
default_leaflet_tiles = (
'OpenStreetMap',
'http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
('© <a href="http://www.openstreetmap.org" target="_parent">OpenStreetMap'
'</a> and contributors, under an <a '
'href="http://www.openstreetmap.org/copyright" target="_parent">open '
'license</a>')
)
LEAFLET_TILES = getattr(settings, 'LEAFLET_TILES', default_leaflet_tiles)
settings.TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.media',
)
| # coding=utf-8
"""Settings file for WMS Client.
"""
from django.conf import settings
# Allow base django project to override settings
default_leaflet_tiles = (
'OpenStreetMap',
'http://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
('© <a hr ef="http://www.openstreetmap.org" target="_parent">OpenStreetMap'
'</a> and contributors, under an <a '
'href="http://www.openstreetmap.org/copyright" target="_parent">open '
'license</a>')
)
LEAFLET_TILES = getattr(settings, 'LEAFLET_TILES', default_leaflet_tiles)
settings.TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.media',
)
| Python | 0.000002 |
cdb4f7088ba49c0e2b590d8b818226e4e59eb45e | Fix tests. | st2client/tests/unit/test_config_parser.py | st2client/tests/unit/test_config_parser.py | # coding=utf-8
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import unittest2
from st2client.config_parser import CLIConfigParser
from st2client.config_parser import CONFIG_DEFAULT_VALUES
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE_PATH_FULL = os.path.join(BASE_DIR, '../fixtures/st2rc.full.ini')
CONFIG_FILE_PATH_PARTIAL = os.path.join(BASE_DIR, '../fixtures/st2rc.partial.ini')
CONFIG_FILE_PATH_UNICODE = os.path.join(BASE_DIR, '../fixtures/test_unicode.ini')
class CLIConfigParserTestCase(unittest2.TestCase):
def test_constructor(self):
parser = CLIConfigParser(config_file_path='doesnotexist', validate_config_exists=False)
self.assertTrue(parser)
self.assertRaises(ValueError, CLIConfigParser, config_file_path='doestnotexist',
validate_config_exists=True)
def test_parse(self):
# File doesn't exist
parser = CLIConfigParser(config_file_path='doesnotexist', validate_config_exists=False)
result = parser.parse()
self.assertEqual(CONFIG_DEFAULT_VALUES, result)
# File exists - all the options specified
expected = {
'general': {
'base_url': 'http://127.0.0.1',
'api_version': 'v1',
'cacert': 'cacartpath',
'silence_ssl_warnings': False
},
'cli': {
'debug': True,
'cache_token': False,
'timezone': 'UTC'
},
'credentials': {
'username': 'test1',
'password': 'test1',
'api_key': None
},
'api': {
'url': 'http://127.0.0.1:9101/v1'
},
'auth': {
'url': 'http://127.0.0.1:9100/'
},
'stream': {
'url': 'http://127.0.0.1:9102/v1/stream'
}
}
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_FULL,
validate_config_exists=False)
result = parser.parse()
self.assertEqual(expected, result)
# File exists - missing options, test defaults
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_PARTIAL,
validate_config_exists=False)
result = parser.parse()
self.assertTrue(result['cli']['cache_token'], True)
def test_get_config_for_unicode_char(self):
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_UNICODE,
validate_config_exists=False)
config = parser.parse()
if six.PY3:
self.assertEqual(config['credentials']['password'], '密码')
else:
self.assertEqual(config['credentials']['password'], u'\u5bc6\u7801')
| # coding=utf-8
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import unittest2
from st2client.config_parser import CLIConfigParser
from st2client.config_parser import CONFIG_DEFAULT_VALUES
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE_PATH_FULL = os.path.join(BASE_DIR, '../fixtures/st2rc.full.ini')
CONFIG_FILE_PATH_PARTIAL = os.path.join(BASE_DIR, '../fixtures/st2rc.partial.ini')
CONFIG_FILE_PATH_UNICODE = os.path.join(BASE_DIR, '../fixtures/test_unicode.ini')
class CLIConfigParserTestCase(unittest2.TestCase):
def test_constructor(self):
parser = CLIConfigParser(config_file_path='doesnotexist', validate_config_exists=False)
self.assertTrue(parser)
self.assertRaises(ValueError, CLIConfigParser, config_file_path='doestnotexist',
validate_config_exists=True)
def test_parse(self):
# File doesn't exist
parser = CLIConfigParser(config_file_path='doesnotexist', validate_config_exists=False)
result = parser.parse()
self.assertEqual(CONFIG_DEFAULT_VALUES, result)
# File exists - all the options specified
expected = {
'general': {
'base_url': 'http://127.0.0.1',
'api_version': 'v1',
'cacert': 'cacartpath',
'silence_ssl_warnings': False
},
'cli': {
'debug': True,
'cache_token': False,
'timezone': 'UTC'
},
'credentials': {
'username': 'test1',
'password': 'test1',
'api_key': None
},
'api': {
'url': 'http://127.0.0.1:9101/v1'
},
'auth': {
'url': 'http://127.0.0.1:9100/'
},
'stream': {
'url': 'http://127.0.0.1:9102/v1/stream'
}
}
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_FULL,
validate_config_exists=False)
result = parser.parse()
self.assertEqual(expected, result)
# File exists - missing options, test defaults
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_PARTIAL,
validate_config_exists=False)
result = parser.parse()
self.assertTrue(result['cli']['cache_token'], True)
def test_get_config_for_unicode_char(self):
parser = CLIConfigParser(config_file_path=CONFIG_FILE_PATH_UNICODE,
validate_config_exists=False)
config = parser.parse()
if six.PY3:
self.assertEqual(config['credentials']['password'], u'测试')
else:
self.assertEqual(config['credentials']['password'], u'\u5bc6\u7801')
| Python | 0 |
4071c77a6e598c27f7a8b2195ff5e68332120615 | Fix formatting. | st2common/st2common/cmd/validate_config.py | st2common/st2common/cmd/validate_config.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for validating a config file against a a particular config schema.
"""
import yaml
from oslo_config import cfg
from st2common.constants.system import VERSION_STRING
from st2common.constants.exit_codes import SUCCESS_EXIT_CODE
from st2common.constants.exit_codes import FAILURE_EXIT_CODE
from st2common.util.pack import validate_config_against_schema
__all__ = [
'main'
]
def _do_register_cli_opts(opts, ignore_errors=False):
for opt in opts:
try:
cfg.CONF.register_cli_opt(opt)
except:
if not ignore_errors:
raise
def _register_cli_opts():
cli_opts = [
cfg.StrOpt('schema-path', default=None, required=True,
help='Path to the config schema to use for validation.'),
cfg.StrOpt('config-path', default=None, required=True,
help='Path to the config file to validate.'),
]
for opt in cli_opts:
cfg.CONF.register_cli_opt(opt)
def main():
_register_cli_opts()
cfg.CONF(args=None, version=VERSION_STRING)
schema_path = cfg.CONF.schema_path
config_path = cfg.CONF.config_path
print('Validating config "%s" against schema in "%s"' % (config_path, schema_path))
with open(schema_path, 'r') as fp:
config_schema = yaml.safe_load(fp.read())
with open(config_path, 'r') as fp:
config_object = yaml.safe_load(fp.read())
try:
validate_config_against_schema(config_schema=config_schema, config_object=config_object)
except Exception as e:
print('Failed to validate pack config: %s' % str(e))
return FAILURE_EXIT_CODE
print('Config "%s" successfuly validated against schema in %s.' % (config_path, schema_path))
return SUCCESS_EXIT_CODE
| # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for validating a config file against a a particular config schema.
"""
import yaml
import traceback
from oslo_config import cfg
from st2common.constants.system import VERSION_STRING
from st2common.constants.exit_codes import SUCCESS_EXIT_CODE
from st2common.constants.exit_codes import FAILURE_EXIT_CODE
from st2common.util.pack import validate_config_against_schema
__all__ = [
'main'
]
def _do_register_cli_opts(opts, ignore_errors=False):
for opt in opts:
try:
cfg.CONF.register_cli_opt(opt)
except:
if not ignore_errors:
raise
def _register_cli_opts():
cli_opts = [
cfg.StrOpt('schema-path', default=None, required=True,
help='Path to the config schema to use for validation.'),
cfg.StrOpt('config-path', default=None, required=True,
help='Path to the config file to validate.'),
]
for opt in cli_opts:
cfg.CONF.register_cli_opt(opt)
def main():
_register_cli_opts()
cfg.CONF(args=None, version=VERSION_STRING)
schema_path = cfg.CONF.schema_path
config_path = cfg.CONF.config_path
print('Validating config "%s" against schema in "%s"' % (config_path, schema_path))
with open(schema_path, 'r') as fp:
config_schema = yaml.safe_load(fp.read())
with open(config_path, 'r') as fp:
config_object = yaml.safe_load(fp.read())
try:
validate_config_against_schema(config_schema=config_schema, config_object=config_object)
except Exception as e:
print('Failed to validate pack config: %s', str(e))
traceback.print_exc()
return FAILURE_EXIT_CODE
print('Config "%s" successfuly validated against schema in %s.' % (config_path, schema_path))
return SUCCESS_EXIT_CODE
| Python | 0.000017 |
d35aed562b3c9eba6f7de7ac4aa7d6ad7723ec0a | Add listnener decos | cogs/cancer.py | cogs/cancer.py | from discord.ext.commands import Cog
class Cancer(Cog):
def __init__(self, bot):
self.bot = bot
self.ok_list = [198101180180594688, 246291440106340352]
@Cog.listener
async def on_member_join(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("yes " + member.mention)
@Cog.listener
async def on_member_remove(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("no " + member.mention)
@Cog.listener
async def on_guild_emojis_update(self, guild, before, after):
if guild.id not in self.ok_list:
return
await guild.system_channel.send("the emojis were updated")
def setup(bot):
bot.add_cog(Cancer(bot))
| from discord.ext import commands
class Cancer(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ok_list = [198101180180594688, 246291440106340352]
async def on_member_join(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("yes " + member.mention)
async def on_member_remove(self, member):
if member.guild.id not in self.ok_list:
return
await member.guild.system_channel.send("no " + member.mention)
async def on_guild_emojis_update(self, guild, before, after):
if guild.id not in self.ok_list:
return
await guild.system_channel.send("the emojis were updated")
def setup(bot):
bot.add_cog(Cancer(bot))
| Python | 0 |
1b3c9e5f46f48865882f1087ced0ade168233711 | fix formatting and caching | cogs/stonks.py | cogs/stonks.py | import discord
import json
from datetime import datetime
from discord.ext import commands
from utils.aiohttp_wrap import aio_get_json
class Stonks(commands.Cog):
URL = "https://finnhub.io/api/v1/quote"
TTL = 60 * 15
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
self.redis_client = bot.redis_client
# self.headers = {'X-Finnhub-Token': bot.api_keys["stonks"]}
with open('data/apikeys.json') as f:
self.api_key = json.load(f)["stonks"]
self.headers = {'X-Finnhub-Token': self.api_key}
@commands.command(name="stonk", aliases=["stonks", "stock", "stocks"])
async def stonks(self, ctx: commands.Context, *, symbol: str):
symbol = symbol.upper()
params = {"symbol": symbol}
redis_key = f"stonks:{symbol}"
if await self.redis_client.exists(redis_key):
resp = json.loads(await self.redis_client.get(redis_key))
else:
resp = await aio_get_json(self.session, self.URL, headers=self.headers, params=params)
if resp is None:
return await ctx.error("API Error", description="There was an issue with the stocks API, try again later")
if resp['t'] == 0:
return await ctx.error("Stock error", description=f"Couldn't find any stock information for `{symbol}`")
await self.redis_client.set(redis_key, json.dumps(resp), ex=self.TTL)
em = discord.Embed(color=discord.Color.blurple())
em.set_author(name=symbol, icon_url="https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/twitter/259/chart-increasing_1f4c8.png")
em.add_field(name="Current Price", value=f"${resp['c']:.2f}")
em.add_field(name="Previous Close", value=f"${resp['pc']:.2f}")
em.add_field(name="% Change Today", value=f"{(resp['c'] - resp['pc'])/resp['pc']:.2%}")
em.set_footer()
em.timestamp = datetime.fromtimestamp(resp['t'])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Stonks(bot))
| import discord
import json
from datetime import datetime
from discord.ext import commands
from utils.aiohttp_wrap import aio_get_json
class Stonks(commands.Cog):
URL = "https://finnhub.io/api/v1/quote"
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
self.redis_client = bot.redis_client
# self.headers = {'X-Finnhub-Token': bot.api_keys["stonks"]}
with open('data/apikeys.json') as f:
self.api_key = json.load(f)["stonks"]
self.headers = {'X-Finnhub-Token': self.api_key}
@commands.command(name="stonk", aliases=["stonks", "stock", "stocks"])
async def stonks(self, ctx: commands.Context, *, symbol: str):
symbol = symbol.upper()
params = {"symbol": symbol}
redis_key = f"stonks:{symbol}"
if await self.redis_client.exists(redis_key):
resp = json.loads(await self.redis_client.get(redis_key))
else:
resp = await aio_get_json(self.session, self.URL, headers=self.headers, params=params)
if resp is None:
return await ctx.error("API Error", description="There was an issue with the stocks API, try again later")
if resp['t'] == 0:
return await ctx.error("Stock error", description=f"Couldn't find any stock information for `{symbol}`")
await self.redis_client.set(redis_key, json.dumps(resp))
em = discord.Embed(color=discord.Color.blurple())
em.set_author(name=symbol, icon_url="https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/twitter/259/chart-increasing_1f4c8.png")
em.add_field(name="Current Price", value=f"${resp['c']}")
em.add_field(name="Previous Close", value=f"${resp['pc']}")
em.add_field(name="% Change today", value=f"{(resp['c'] - resp['pc'])/resp['pc']:.2%}")
em.timestamp = datetime.fromtimestamp(resp['t'])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Stonks(bot))
| Python | 0.000001 |
50e69a0d53dffbc961b865f583ca071dfb49648c | Reformat class | mediacloud/mediawords/util/sql.py | mediacloud/mediawords/util/sql.py | import time
import datetime
# noinspection PyPackageRequirements
import dateutil.parser
from mediawords.util.perl import decode_string_from_bytes_if_needed
def get_sql_date_from_epoch(epoch: int) -> str:
# Returns local date by default, no need to set timezone
try:
return datetime.datetime.fromtimestamp(int(epoch)).strftime('%Y-%m-%d %H:%M:%S')
except ValueError:
# Mimic perl's behavior of sending the 0 epoch date on an error
return '1970-01-01 00:00:00'
def sql_now() -> str:
return get_sql_date_from_epoch(int(time.time()))
def get_epoch_from_sql_date(date: str) -> int:
"""Given a date in the sql format 'YYYY-MM-DD', return the epoch time."""
date = decode_string_from_bytes_if_needed(date)
parsed_date = dateutil.parser.parse(date)
return int(parsed_date.timestamp())
def increment_day(date: str, days: int = 1) -> str:
"""Given a date in the sql format 'YYYY-MM-DD', increment it by $days days."""
date = decode_string_from_bytes_if_needed(date)
if days == 0:
return date
epoch_date = get_epoch_from_sql_date(date) + (((days * 24) + 12) * 60 * 60)
return datetime.datetime.fromtimestamp(int(epoch_date)).strftime('%Y-%m-%d')
| import time
import datetime
# noinspection PyPackageRequirements
import dateutil.parser
from mediawords.util.perl import decode_string_from_bytes_if_needed
def get_sql_date_from_epoch(epoch: int) -> str:
# Returns local date by default, no need to set timezone
try:
return datetime.datetime.fromtimestamp(int(epoch)).strftime('%Y-%m-%d %H:%M:%S')
except( ValueError ):
# mimic perl's behavior of sending the 0 epoch date on an error
return '1970-01-01 00:00:00'
def sql_now() -> str:
return get_sql_date_from_epoch(int(time.time()))
def get_epoch_from_sql_date(date: str) -> int:
"""Given a date in the sql format 'YYYY-MM-DD', return the epoch time."""
date = decode_string_from_bytes_if_needed(date)
parsed_date = dateutil.parser.parse(date)
return int(parsed_date.timestamp())
def increment_day(date: str, days: int = 1) -> str:
"""Given a date in the sql format 'YYYY-MM-DD', increment it by $days days."""
date = decode_string_from_bytes_if_needed(date)
if days == 0:
return date
epoch_date = get_epoch_from_sql_date(date) + (((days * 24) + 12) * 60 * 60)
return datetime.datetime.fromtimestamp(int(epoch_date)).strftime('%Y-%m-%d')
| Python | 0 |
b87711d62a1f2c4974f945625312d8a33ba91fb6 | convert grp_members into a lambda and add usr_search lambda | code-samples/membersOfDomainGroup.py | code-samples/membersOfDomainGroup.py | #!/usr/bin/env python
# print a list of members of a domain group
param = {
'-f': 'mail', # field name
'-s': '\n', # separator
}
import getopt
import ldap
import re
import sys
try:
param.update(dict(getopt.getopt(sys.argv[1:], 'g:f:s:')[0]))
if '-g' not in param:
sys.stderr.write("-g parameter is required\n")
sys.exit(1)
except getopt.GetoptError:
sys.stderr.write("Usage: %s -g groupName [ -f LDAP field ] [ -s output separator ]\n" % sys.argv[0])
sys.exit(1)
ldapSrv = ldap.initialize('ldap://dc.example.com')
ldapSrv.bind_s('bind-user@example.com', 'bindPasSw0rd')
# get output filed from ldap results
ldap_output = lambda r: r[1][param['-f']][0]
# make a flat list from a list of lists
flat = lambda lst: reduce(lambda l, e: l + flat(e) if isinstance(e, list) else l + [e], lst, [])
# search for a group by filter
grp_search = lambda fltr: ldapSrv.search_s('ou=Resources,dc=example,dc=com', ldap.SCOPE_SUBTREE, '(&(objectclass=group)(%s))' % fltr, ['dn'])
# search for users inside a given group
usr_search = lambda grpDN: ldapSrv.search_s('ou=Users,dc=example,dc=com', ldap.SCOPE_SUBTREE, '(&(objectclass=person)(memberOf=%s))' % grpDN, [param['-f']])
# get a nested list of the members of a group with a given DN
grp_members = lambda grpDN: [grp_members(grp[0]) for grp in grp_search('memberOf=%s' % grpDN)] + usr_search(grpDN)
grp = grp_search('name=%s' % param['-g'])
if not grp:
sys.stderr.write("Group '%s' isn't found in LDAP\n" % param['-g'])
sys.exit(2)
print param['-s'].join(sorted(set(ldap_output(res) for res in flat(grp_members(grp[0][0])) if res)))
| #!/usr/bin/env python
# print a list of members of a domain group
param = {
'-f': 'mail', # field name
'-s': '\n', # separator
}
import getopt
import ldap
import re
import sys
try:
param.update(dict(getopt.getopt(sys.argv[1:], 'g:f:s:')[0]))
if '-g' not in param:
sys.stderr.write("-g parameter is required\n")
sys.exit(1)
except getopt.GetoptError:
sys.stderr.write("Usage: %s -g groupName [ -f LDAP field ] [ -s output separator ]\n" % sys.argv[0])
sys.exit(1)
ldapSrv = ldap.initialize('ldap://dc.example.com')
ldapSrv.bind_s('bind-user@example.com', 'bindPasSw0rd')
# get output filed from ldap results
ldap_output = lambda r: r[1][param['-f']][0]
# make a flat list from a list of lists
flat = lambda lst: reduce(lambda l, e: l + flat(e) if isinstance(e, list) else l + [e], lst, [])
# search for a group by filter
grp_search = lambda fltr: ldapSrv.search_s('ou=Resources,dc=example,dc=com', ldap.SCOPE_SUBTREE, '(&(objectclass=group)(%s))' % fltr, ['dn'])
# search for members in LDAP groups and return a nested list of them
def grp_members(gdn):
return [grp_members(grp[0]) for grp in grp_search('memberOf=%s' % gdn)
] + ldapSrv.search_s('ou=Users,dc=example,dc=com', ldap.SCOPE_SUBTREE, '(&(objectclass=person)(memberOf=%s))' % gdn, [param['-f']])
grp = grp_search('name=%s' % param['-g'])
if not grp:
sys.stderr.write("Group '%s' isn't found in LDAP\n" % param['-g'])
sys.exit(2)
print param['-s'].join(sorted(set(ldap_output(res) for res in flat(grp_members(grp[0][0])) if res)))
| Python | 0.000032 |
1189a06433d1a38662124d5799eb2610c31d5100 | remove commented out code | src/buzzfeed/clean_parsed_colombia.py | src/buzzfeed/clean_parsed_colombia.py | """
Script to clean the Colombia data from BuzzFeed Zika data repository
Run this script from the root directory
e.g., `~/git/vbi/zika_data_to_cdc'
from there you can run `python src/buzfeed/clean_parsed_colombia.py`
"""
import os
import sys
import re
import pandas as pd
sys.path.append(os.getcwd())
import src.helper as helper
def clean_and_export_municipal(municipal_data_path, places_df, data_guide_df):
num_data = len(municipal_data_path)
for idx, data_path in enumerate(municipal_data_path):
print("cleaning municipal {} of {}".format(idx + 1, num_data))
df = pd.read_csv(data_path)
report_date = helper.get_report_date_from_filepath(data_path)
df['report_date'] = report_date
df['time_period'] = "NA"
df = pd.merge(df, places_df,
left_on=['department', 'municipality'],
right_on=['alt_name1', 'alt_name2'])
melt_columns = [x for x in df.columns if re.search('^zika_', x)]
id_vars = [x for x in df.columns if x not in melt_columns]
df = pd.melt(df,
id_vars=id_vars,
value_vars=melt_columns,
var_name='data_field_original',
value_name='value')
df = pd.merge(df, data_guide_df,
left_on=['data_field_original'],
right_on=['data_field'])
df = helper.subset_columns_for_cdc(df)
df.to_csv('output/colombia-municipal-{}.csv'.format(report_date),
index=False)
def clean_and_export_regional(regional_data_path):
pass
def main():
places_path = '../zika/Colombia/CO_Places.csv'
places = pd.read_csv(places_path)
data_guide_path = '../zika/Colombia/CO_Data_Guide.csv'
data_guide = pd.read_csv(data_guide_path)
buzzfeed_colombia_datasets = helper.get_data_from_path(
os.path.join('..', 'zika-data', 'data',
'parsed', 'colombia', '*.csv'))
print("Datasets found: {}\n".format(buzzfeed_colombia_datasets))
colombia_municipal = [
x for x in buzzfeed_colombia_datasets if re.search('municipal', x)]
colombia_regional = [
x for x in buzzfeed_colombia_datasets if x not in colombia_municipal]
print("municipal datasets: {}\n".format(colombia_municipal))
print("regional datasets: {}\n".format(colombia_regional))
clean_and_export_municipal(colombia_municipal, places, data_guide)
clean_and_export_regional(colombia_regional)
if __name__ == '__main__':
main()
| """
Script to clean the Colombia data from BuzzFeed Zika data repository
Run this script from the root directory
e.g., `~/git/vbi/zika_data_to_cdc'
from there you can run `python src/buzfeed/clean_parsed_colombia.py`
"""
import os
import sys
import re
import pandas as pd
sys.path.append(os.getcwd())
import src.helper as helper
def clean_and_export_municipal(municipal_data_path, places_df, data_guide_df):
num_data = len(municipal_data_path)
for idx, data_path in enumerate(municipal_data_path):
print("cleaning municipal {} of {}".format(idx + 1, num_data))
df = pd.read_csv(data_path)
report_date = helper.get_report_date_from_filepath(data_path)
df['report_date'] = report_date
df['time_period'] = "NA"
df = pd.merge(df, places_df,
left_on=['department', 'municipality'],
right_on=['alt_name1', 'alt_name2'])
melt_columns = [x for x in df.columns if re.search('^zika_', x)]
id_vars = [x for x in df.columns if x not in melt_columns]
df = pd.melt(df,
id_vars=id_vars,
value_vars=melt_columns,
var_name='data_field_original',
value_name='value')
df = pd.merge(df, data_guide_df,
left_on=['data_field_original'],
right_on=['data_field'])
df = helper.subset_columns_for_cdc(df)
df.to_csv('output/colombia-municipal-{}.csv'.format(report_date),
index=False)
# break
def clean_and_export_regional(regional_data_path):
pass
def main():
# here = os.path.abspath(os.path.dirname(__file__))
# print(here)
places_path = '../zika/Colombia/CO_Places.csv'
places = pd.read_csv(places_path)
data_guide_path = '../zika/Colombia/CO_Data_Guide.csv'
data_guide = pd.read_csv(data_guide_path)
buzzfeed_colombia_datasets = helper.get_data_from_path(
os.path.join('..', 'zika-data', 'data',
'parsed', 'colombia', '*.csv'))
print("Datasets found: {}\n".format(buzzfeed_colombia_datasets))
colombia_municipal = [
x for x in buzzfeed_colombia_datasets if re.search('municipal', x)]
colombia_regional = [
x for x in buzzfeed_colombia_datasets if x not in colombia_municipal]
print("municipal datasets: {}\n".format(colombia_municipal))
print("regional datasets: {}\n".format(colombia_regional))
clean_and_export_municipal(colombia_municipal, places, data_guide)
clean_and_export_regional(colombia_regional)
if __name__ == '__main__':
main()
| Python | 0 |
f47ebbe4dcacdd0ef96799a5d11925e0a8b6d5d5 | fix import path | test/test_resultset.py | test/test_resultset.py | from unittest import TestCase
from statscraper import ResultSet
from pandas.api import types as ptypes
class TestResultSet(TestCase):
def test_pandas_export(self):
result = ResultSet()
result.append({'city': "Voi", 'value': 45483})
df = result.pandas
self.assertTrue(ptypes.is_numeric_dtype(df.value))
| from unittest import TestCase
from statscraper.base_scraper import ResultSet
from pandas.api import types as ptypes
class TestResultSet(TestCase):
def test_pandas_export(self):
result = ResultSet()
result.append({'city': "Voi", 'value': 45483})
df = result.pandas
self.assertTrue(ptypes.is_numeric_dtype(df.value))
| Python | 0.000007 |
cf5ad85a35824646a30d90de79d72f4068dade50 | Fix failing QML test with Qt 5.9 due to assert | tests/QtQml/bug_557.py | tests/QtQml/bug_557.py | #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
from helper import adjust_filename
from PySide2.QtCore import QUrl
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlEngine, QQmlComponent
app = QGuiApplication(sys.argv)
engine = QQmlEngine()
component = QQmlComponent(engine)
# This should segfault if the QDeclarativeComponent has not QQmlEngine
component.loadUrl(QUrl.fromLocalFile(adjust_filename('foo.qml', __file__)))
| #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PySide2.QtCore import QUrl
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlEngine, QQmlComponent
app = QGuiApplication(sys.argv)
engine = QQmlEngine()
component = QQmlComponent(engine)
# This should segfault if the QDeclarativeComponent has not QQmlEngine
component.loadUrl(QUrl.fromLocalFile('foo.qml'))
| Python | 0 |
a5b034704b75496cd1357b66f5fe0bbabb27a114 | Implement ``plot_tree`` method | astrodendro/plot.py | astrodendro/plot.py | import numpy as np
class DendrogramPlotter(object):
"""
A class to plot a dendrogram object
"""
def __init__(self, dendrogram):
# should we copy to ensure immutability?
self.dendrogram = dendrogram
self._cached_positions = None
self.sort()
def sort(self, sort_key=lambda s: s.get_peak(subtree=True)[1], reverse=False):
"""
Sort the position of the leaves for plotting
Parameters
----------
sort_key : function, optional
This should be a function that takes a
`~astrodendro.structure.Structure` and returns a scalar that is
then used to sort the leaves.
reverse : bool, optional
Whether to reverse the sorting
"""
sorted_trunk_structures = sorted(self.dendrogram.trunk, key=sort_key, reverse=reverse)
positions = {}
x = 0 # the first index for each trunk structure
for structure in sorted_trunk_structures:
# Get sorted leaves
sorted_leaves = structure.get_sorted_leaves(subtree=True, reverse=reverse)
# Loop over leaves and assign positions
for leaf in sorted_leaves:
positions[leaf] = x
x += 1
# Sort structures from the top-down
sorted_structures = sorted(structure.descendants, key=lambda s: s.level, reverse=True) + [structure]
# Loop through structures and assing position of branches as the mean
# of the leaves
for structure in sorted_structures:
if not structure.is_leaf:
positions[structure] = np.mean([positions[child] for child in structure.children])
self._cached_positions = positions
def plot_tree(self, ax, structure=None, subtree=True, autoscale=True, **kwargs):
"""
Plot the dendrogram tree or a substructure
Parameters
----------
ax : `matplotlib.axes.Axes` instance
The Axes inside which to plot the dendrogram
structure : int or `~astrodendro.structure.Structure`, optional
If specified, only plot this structure
subtree : bool, optional
If a structure is specified, by default the whole subtree will be
plotted, but this can be disabled with this option.
autoscale : bool, optional
Whether to automatically adapt the window limits to the tree
Notes
-----
Any additional keyword arguments are passed to
`~matplotlib.collections.LineCollection` and can be used to control the
appearance of the plot.
"""
# Get the lines for the dendrogram
lines = self.get_lines(structure=structure, **kwargs)
# Add the lines to the axes
ax.add_collection(lines)
# Auto-scale axes (doesn't happen by default with ``add_collection``)
if autoscale:
ax.margins(0.05)
ax.autoscale_view(True, True, True)
def get_lines(self, structure=None, **kwargs):
"""
Get a collection of lines to draw the dendrogram
Parameters
----------
structure : `~astrodendro.structure.Structure`
The structure to plot. If not set, the whole tree will be plotted.
Returns
-------
lines : `astrodendro.plot.StructureCollection`
The lines (sub-class of LineCollection) which can be directly used in Matplotlib
Notes
-----
Any additional keyword arguments are passed to the
`~matplotlib.collections.LineCollection` class.
"""
if self._cached_positions is None:
raise Exception("Leaves have not yet been sorted")
if structure is None:
structures = self.dendrogram.all_nodes
else:
if type(structure) is int:
structure = self.dendrogram.nodes_dict[structure]
structures = structure.descendants + [structure]
lines = []
mapping = []
for s in structures:
x = self._cached_positions[s]
bot = s.parent.height if s.parent is not None else s.vmin
top = s.height
lines.append(([x, bot], [x, top]))
mapping.append(s)
if s.is_branch:
pc = [self._cached_positions[c] for c in s.children]
lines.append(([min(pc), top], [max(pc), top]))
mapping.append(s)
from .structure_collection import StructureCollection
sc = StructureCollection(lines, **kwargs)
sc.structures = mapping
return sc
| import numpy as np
class DendrogramPlotter(object):
"""
A class to plot a dendrogram object
"""
def __init__(self, dendrogram):
# should we copy to ensure immutability?
self.dendrogram = dendrogram
self._cached_positions = None
self.sort()
def sort(self, sort_key=lambda s: s.get_peak(subtree=True)[1], reverse=False):
"""
Sort the position of the leaves for plotting
Parameters
----------
sort_key : function, optional
This should be a function that takes a
`~astrodendro.structure.Structure` and returns a scalar that is
then used to sort the leaves.
reverse : bool, optional
Whether to reverse the sorting
"""
sorted_trunk_structures = sorted(self.dendrogram.trunk, key=sort_key, reverse=reverse)
positions = {}
x = 0 # the first index for each trunk structure
for structure in sorted_trunk_structures:
# Get sorted leaves
sorted_leaves = structure.get_sorted_leaves(subtree=True, reverse=reverse)
# Loop over leaves and assign positions
for leaf in sorted_leaves:
positions[leaf] = x
x += 1
# Sort structures from the top-down
sorted_structures = sorted(structure.descendants, key=lambda s: s.level, reverse=True) + [structure]
# Loop through structures and assing position of branches as the mean
# of the leaves
for structure in sorted_structures:
if not structure.is_leaf:
positions[structure] = np.mean([positions[child] for child in structure.children])
self._cached_positions = positions
def get_lines(self, structure=None):
"""
Get a collection of lines to draw the dendrogram
Parameters
----------
structure : `~astrodendro.structure.Structure`
The structure to plot. If not set, the whole tree will be plotted.
Returns
-------
lines : `astrodendro.plot.StructureCollection`
The lines (sub-class of LineCollection) which can be directly used in Matplotlib
"""
if self._cached_positions is None:
raise Exception("Leaves have not yet been sorted")
if structure is None:
structures = self.dendrogram.all_nodes
else:
structures = structure.descendants + [structure]
lines = []
mapping = []
for s in structures:
x = self._cached_positions[s]
bot = s.parent.height if s.parent is not None else s.vmin
top = s.height
lines.append(([x, bot], [x, top]))
mapping.append(s)
if s.is_branch:
pc = [self._cached_positions[c] for c in s.children]
lines.append(([min(pc), top], [max(pc), top]))
mapping.append(s)
from .structure_collection import StructureCollection
sc = StructureCollection(lines)
sc.structures = mapping
return sc
| Python | 0.000029 |
5392bf25d16166162d53ddc1f063907d72444a92 | add in new tests for new functionality | tests/cloudlet_test.py | tests/cloudlet_test.py | from cement.core import handler, hook
from cement.utils import test
from nepho import cli
from nepho.cli.base import Nepho
import nose
class NephoTestApp(Nepho):
class Meta:
argv = []
config_files = []
# Test Cloudlet
class a_TestNephoCloudlet(test.CementTestCase):
app_class = NephoTestApp
def setUp(self):
super(a_TestNephoCloudlet, self).setUp()
self.reset_backend()
app = self.make_app(argv=['cloudlet', 'registry-update'])
app.setup()
app.run()
app.close()
app = self.make_app(argv=['cloudlet', '--quiet', 'uninstall', 'nepho-example'])
app.setup()
app.run()
app.close()
app = self.make_app(argv=['cloudlet', '--quiet', 'install', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_registry_update(self):
app = self.make_app(argv=['cloudlet', 'registry-update'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_update_registry(self):
app = self.make_app(argv=['cloudlet', 'update-registry'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_list_directories(self):
app = self.make_app(argv=['cloudlet', 'directory-list'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_add_directories(self):
app = self.make_app(argv=['cloudlet', 'directory-add', '--directory', '.'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_rm_directories(self):
app = self.make_app(argv=['cloudlet', 'directory-remove', '--directory', '.'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_list(self):
app = self.make_app(argv=['cloudlet', 'list'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_search(self):
app = self.make_app(argv=['cloudlet', 'search', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_create(self):
app = self.make_app(argv=['cloudlet', 'create', 'test-cloudlet'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_install(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'install', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_describe(self):
app = self.make_app(argv=['cloudlet', 'describe', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_update(self):
app = self.make_app(argv=['cloudlet', 'update', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_upgrade(self):
app = self.make_app(argv=['cloudlet', 'upgrade', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_uninstall(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'uninstall', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_remove(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'remove', 'nepho-example'])
app.setup()
app.run()
app.close()
| from cement.core import handler, hook
from cement.utils import test
from nepho import cli
from nepho.cli.base import Nepho
import nose
class NephoTestApp(Nepho):
class Meta:
argv = []
config_files = []
# Test Cloudlet
class a_TestNephoCloudlet(test.CementTestCase):
app_class = NephoTestApp
def setUp(self):
super(a_TestNephoCloudlet, self).setUp()
self.reset_backend()
app = self.make_app(argv=['cloudlet', 'registry-update'])
app.setup()
app.run()
app.close()
app = self.make_app(argv=['cloudlet', '--quiet', 'uninstall', 'nepho-example'])
app.setup()
app.run()
app.close()
app = self.make_app(argv=['cloudlet', '--quiet', 'install', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_registry_update(self):
app = self.make_app(argv=['cloudlet', 'registry-update'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_update_registry(self):
app = self.make_app(argv=['cloudlet', 'update-registry'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_list(self):
app = self.make_app(argv=['cloudlet', 'list'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_search(self):
app = self.make_app(argv=['cloudlet', 'search', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_install(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'install', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_describe(self):
app = self.make_app(argv=['cloudlet', 'describe', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_update(self):
app = self.make_app(argv=['cloudlet', 'update', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_upgrade(self):
app = self.make_app(argv=['cloudlet', 'upgrade', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_uninstall(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'uninstall', 'nepho-example'])
app.setup()
app.run()
app.close()
def test_nepho_cloudlet_remove(self):
raise nose.SkipTest('skip this until #164 is implemented')
app = self.make_app(argv=['cloudlet', 'remove', 'nepho-example'])
app.setup()
app.run()
app.close()
| Python | 0 |
2ddfb4f0f4f2de060399a6e5b519a7f4b788ace5 | make it possible to show languages for selected values on a map | autotyp/adapters.py | autotyp/adapters.py | from sqlalchemy.orm import joinedload
from clld.interfaces import IParameter, IValue, IIndex
from clld.db.meta import DBSession
from clld.db.models.common import ValueSet
from clld.web.adapters.base import Index
from clld.web.adapters.geojson import GeoJsonParameter
from clld.web.maps import SelectedLanguagesMap
class GeoJsonFeature(GeoJsonParameter):
def feature_iterator(self, ctx, req):
return DBSession.query(ValueSet).filter(ValueSet.parameter_pk == ctx.pk)\
.options(joinedload(ValueSet.language))
def feature_properties(self, ctx, req, valueset):
return {}
class MapView(Index):
extension = str('map.html')
mimetype = str('text/vnd.clld.map+html')
send_mimetype = str('text/html')
template = 'language/map_html.mako'
def template_context(self, ctx, req):
languages = list(v.valueset.language for v in ctx.get_query(limit=8000))
return {
'map': SelectedLanguagesMap(ctx, req, languages),
'languages': languages}
def includeme(config):
config.register_adapter(GeoJsonFeature, IParameter)
config.register_adapter(MapView, IValue, IIndex)
| from sqlalchemy.orm import joinedload
from clld.interfaces import IParameter, ILanguage, IIndex
from clld.db.meta import DBSession
from clld.db.models.common import ValueSet
from clld.web.adapters.base import Index
from clld.web.adapters.geojson import GeoJsonParameter
from clld.web.maps import SelectedLanguagesMap
class GeoJsonFeature(GeoJsonParameter):
def feature_iterator(self, ctx, req):
return DBSession.query(ValueSet).filter(ValueSet.parameter_pk == ctx.pk)\
.options(joinedload(ValueSet.language))
def feature_properties(self, ctx, req, valueset):
return {}
class MapView(Index):
extension = str('map.html')
mimetype = str('text/vnd.clld.map+html')
send_mimetype = str('text/html')
template = 'language/map_html.mako'
def template_context(self, ctx, req):
languages = list(ctx.get_query(limit=8000))
return {
'map': SelectedLanguagesMap(ctx, req, languages),
'languages': languages}
def includeme(config):
config.register_adapter(GeoJsonFeature, IParameter)
config.register_adapter(MapView, ILanguage, IIndex)
| Python | 0 |
c80761c6a9ed668329891100e658c34a43f07891 | Rename and improve assert_learning -> arp_cache_rtts | tests/devices_tests.py | tests/devices_tests.py | """
Devices tests
"""
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from mininet.topo import LinearTopo
from mininet.net import Mininet
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from minicps import constants as c
from minicps.topology import EthStar, Minicps, DLR, L3EthStar
from minicps.devices import POXL2Pairs, POXL2Learning
import os
import time
def setup():
# print 'SETUP!'
setLogLevel(c.TEST_LOG_LEVEL)
def teardown():
# print 'TEAR DOWN!'
pass
def arp_cache_rtts(net, h1, h2):
"""Naive learning check on the first two ping
ICMP packets RTT.
:net: Mininet object.
:h1: first host name.
:h2: second host name.
:returns: decimal RTTs from uncached and cacthed arp entries.
"""
h1, h2 = net.get(h1, h2)
delete_arp_cache = h1.cmd('ip -s -s neigh flush all')
print 'DEBUG delete_arp_cache:\n', delete_arp_cache
ping_output = h1.cmd('ping -c5 %s' % h2.IP())
print 'DEBUG ping_output:\n', ping_output
lines = ping_output.split('\n')
first = lines[1]
second = lines[2]
first_words = first.split(' ')
second_words = second.split(' ')
first_rtt = first_words[6]
second_rtt = second_words[6]
first_rtt = float(first_rtt[5:])
second_rtt = float(second_rtt[5:])
print 'DEBUG first_rtt:', first_rtt
print 'DEBUG second_rtt:', second_rtt
return first_rtt, second_rtt
@with_setup(setup, teardown)
def test_POXL2Pairs():
"""Test build-in forwarding.l2_pairs controller
that adds flow entries using only MAC info.
"""
# raise SkipTest
topo = L3EthStar()
controller = POXL2Pairs
net = Mininet(topo=topo, controller=controller, link=TCLink)
net.start()
time.sleep(1) # allow mininet to init processes
deltas = []
for i in range(5):
first_rtt, second_rtt = arp_cache_rtts(net, 'plc1', 'plc2')
assert_greater(first_rtt, second_rtt,
c.ASSERTION_ERRORS['no_learning'])
deltas.append(first_rtt - second_rtt)
print 'DEBUG deltas:', deltas
# CLI(net)
net.stop()
os.system('sudo mn -c')
@with_setup(setup, teardown)
def test_POXL2Learning():
"""Test build-in forwarding.l2_learning controller
that adds flow entries using only MAC info.
"""
# raise SkipTest
topo = L3EthStar()
controller = POXL2Learning
net = Mininet(topo=topo, controller=controller, link=TCLink)
net.start()
time.sleep(1) # allow mininet to init processes
deltas = []
for i in range(5):
first_rtt, second_rtt = arp_cache_rtts(net, 'plc1', 'plc2')
assert_greater(first_rtt, second_rtt,
c.ASSERTION_ERRORS['no_learning'])
deltas.append(first_rtt - second_rtt)
print 'DEBUG deltas:', deltas
# CLI(net)
net.stop()
os.system('sudo mn -c')
| """
Devices tests
"""
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from mininet.topo import LinearTopo
from mininet.net import Mininet
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from minicps import constants as c
from minicps.topology import EthStar, Minicps, DLR, L3EthStar
from minicps.devices import POXL2Pairs, POXL2Learning
import os
def setup():
# print 'SETUP!'
setLogLevel(c.TEST_LOG_LEVEL)
def teardown():
# print 'TEAR DOWN!'
pass
def assert_learning(ping_output):
"""Naive learning check on the first two ping
ICMP packets RTT.
:ping_output: given as a string.
"""
print 'DEBUG ping_output:\n', ping_output
lines = ping_output.split('\n')
first = lines[1]
second = lines[2]
first_words = first.split(' ')
second_words = second.split(' ')
first_time = first_words[6]
second_time = second_words[6]
first_time = float(first_time[5:])
second_time = float(second_time[5:])
print 'DEBUG first_time:', first_time
print 'DEBUG second_time:', second_time
assert_greater(first_time, second_time,
c.ASSERTION_ERRORS['no_learning'])
@with_setup(setup, teardown)
def test_POXL2Pairs():
"""Test build-in forwarding.l2_pairs controller
that adds flow entries using only MAC info.
"""
# raise SkipTest
topo = L3EthStar()
controller = POXL2Pairs
net = Mininet(topo=topo, controller=controller, link=TCLink)
net.start()
plc1, plc2 = net.get('plc1', 'plc2')
ping_output = plc1.cmd('ping -c3 %s' % plc2.IP())
assert_learning(ping_output)
# CLI(net)
net.stop()
os.system('sudo mn -c')
@with_setup(setup, teardown)
def test_POXL2Learning():
"""Test build-in forwarding.l2_learning controller
that adds flow entries using only MAC info.
"""
raise SkipTest
topo = L3EthStar()
controller = POXL2Learning
net = Mininet(topo=topo, controller=controller, link=TCLink)
net.start()
plc1, plc2 = net.get('plc1', 'plc2')
output = plc1.cmd('ping -c6 %s' % plc2.IP())
print 'DEBUG output:\n', output
# CLI(net)
net.stop()
os.system('sudo mn -c')
| Python | 0 |
e3d082588db63690a846007beb8ddd42ebd4144e | Include pages urls into the main url patterns | config/urls.py | config/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# Custom urls
url(r'^', include('pages.urls', namespace='pages')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# To be removed
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| Python | 0.000001 |
2f412d6d98e6b03c1e3997d6acb0d15cace12e28 | remove trailing spaces | coopy/utils.py | coopy/utils.py | def method_or_none(instance, name):
method = getattr(instance, name)
if (name[0:2] == '__' and name[-2,:] == '__') or \
not callable(method) :
return None
return method
def action_check(obj):
return (hasattr(obj, '__readonly'),
hasattr(obj, '__unlocked'),
hasattr(obj, '__abort_exception'))
def inject(obj, name, dependency):
obj.__dict__[name] = dependency
| def method_or_none(instance, name):
method = getattr(instance, name)
if (name[0:2] == '__' and name[-2,:] == '__') or \
not callable(method) :
return None
return method
def action_check(obj):
return (hasattr(obj, '__readonly'),
hasattr(obj, '__unlocked'),
hasattr(obj, '__abort_exception'))
def inject(obj, name, dependency):
obj.__dict__[name] = dependency
| Python | 0.999463 |
919a4f183e9a09ded7cf6272f9be300f22408c08 | fix method or none method name comparison | coopy/utils.py | coopy/utils.py | def method_or_none(instance, name):
method = getattr(instance, name)
if (name[0:2] == '__' and name[-2:] == '__') or \
not callable(method) :
return None
return method
def action_check(obj):
return (hasattr(obj, '__readonly'),
hasattr(obj, '__unlocked'),
hasattr(obj, '__abort_exception'))
def inject(obj, name, dependency):
obj.__dict__[name] = dependency
| def method_or_none(instance, name):
method = getattr(instance, name)
if (name[0:2] == '__' and name[-2,:] == '__') or \
not callable(method) :
return None
return method
def action_check(obj):
return (hasattr(obj, '__readonly'),
hasattr(obj, '__unlocked'),
hasattr(obj, '__abort_exception'))
def inject(obj, name, dependency):
obj.__dict__[name] = dependency
| Python | 0.000002 |
b59f21ee28cc8eaf56cbc49fd7926e243e92276f | Fix bug for users with Space inside their usernames. | core/models.py | core/models.py | from django.core.exceptions import AppRegistryNotReady
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.utils.translation import ugettext as _
class Profile(models.Model):
about_me = models.TextField(
_('About me'), max_length=500, null=True, blank=True)
github = models.CharField(
_('Github username'), max_length=50, null=True, blank=True)
facebook = models.CharField(
_('Facebook username'), max_length=50, null=True, blank=True)
site = models.URLField(
_('Site url'), max_length=200, null=True, blank=True)
# relations
user = models.OneToOneField(to=settings.AUTH_USER_MODEL)
class Meta:
verbose_name = _('Profile')
def __unicode__(self):
return self.user.get_full_name()
def get_absolute_url(self):
return reverse_lazy(
'user_profile', kwargs={'user__username': self.user.username})
def get_github_url(self):
if self.github:
return 'http://github.com/{}'.format(self.github)
def get_facebook_url(self):
if self.facebook:
return 'http://facebook.com/{}'.format(self.facebook)
def get_site_url(self):
return self.site
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
def slugify_user_username(sender, instance, **kwargs):
instance.username = instance.username.replace(' ', '_')
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except AppRegistryNotReady:
from django.contrib.auth.models import User
post_save.connect(create_user_profile, sender=User)
pre_save.connect(slugify_user_username, sender=User)
| from django.core.exceptions import AppRegistryNotReady
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.utils.translation import ugettext as _
class Profile(models.Model):
about_me = models.TextField(
_('About me'), max_length=500, null=True, blank=True)
github = models.CharField(
_('Github username'), max_length=50, null=True, blank=True)
facebook = models.CharField(
_('Facebook username'), max_length=50, null=True, blank=True)
site = models.URLField(
_('Site url'), max_length=200, null=True, blank=True)
# relations
user = models.OneToOneField(to=settings.AUTH_USER_MODEL)
class Meta:
verbose_name = _('Profile')
def __unicode__(self):
return self.user.get_full_name()
def get_absolute_url(self):
return reverse_lazy(
'user_profile', kwargs={'user__username': self.user.username})
def get_github_url(self):
if self.github:
return 'http://github.com/{}'.format(self.github)
def get_facebook_url(self):
if self.facebook:
return 'http://facebook.com/{}'.format(self.facebook)
def get_site_url(self):
return self.site
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except AppRegistryNotReady:
from django.contrib.auth.models import User
post_save.connect(create_user_profile, sender=User)
| Python | 0 |
b1eb69620bbe875d117498ed95e009a019e54fab | Fix vote app URL patterns | votes/urls.py | votes/urls.py | from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView, results, system_home
urlpatterns = [
url(r'^$', system_home, name="system"),
url(r'^(?P<vote_name>[\w-]+)/$', VoteView.as_view(), name="vote"),
url(r'^(?P<vote_name>[\w-]+)/results/$', results, name="results"),
]
| from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView, results, system_home
urlpatterns = [
url(r'^$', system_home, name="system"),
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view(), name="vote"),
url(r'^(?P<vote_name>[\w-]+)/results$', results, name="results"),
]
| Python | 0.000002 |
1d0b114c7e918c87e14d9ea7a7c49cb9120db68b | Bump version (#128) | vt/version.py | vt/version.py | """Defines VT release version."""
__version__ = '0.17.3'
| """Defines VT release version."""
__version__ = '0.17.2'
| Python | 0 |
1034699a21dc0cf4862624d076d487deae7df9e2 | add NullHandler to avoid "no handlers could be found" error. | Lib/fontTools/__init__.py | Lib/fontTools/__init__.py | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import logging
# add a do-nothing handler to the libary's top-level logger, to avoid
# "no handlers could be found" error if client doesn't configure logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
version = "3.0"
| from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
version = "3.0"
| Python | 0 |
44620b2fa69500e1cada5622fa96eedd9c931006 | Add test for MessageBeep() | Lib/test/test_winsound.py | Lib/test/test_winsound.py | # Ridiculously simple test of the winsound module for Windows.
import winsound, time
for i in range(100, 2000, 100):
winsound.Beep(i, 75)
print "Hopefully you heard some sounds increasing in frequency!"
winsound.MessageBeep()
time.sleep(0.5)
winsound.MessageBeep(winsound.MB_OK)
time.sleep(0.5)
winsound.MessageBeep(winsound.MB_ICONASTERISK)
time.sleep(0.5)
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
time.sleep(0.5)
winsound.MessageBeep(winsound.MB_ICONHAND)
time.sleep(0.5)
winsound.MessageBeep(winsound.MB_ICONQUESTION)
time.sleep(0.5)
| # Ridiculously simple test of the winsound module for Windows.
import winsound
for i in range(100, 2000, 100):
winsound.Beep(i, 75)
print "Hopefully you heard some sounds increasing in frequency!"
| Python | 0 |
df77f4de261e3f21cc95f56fbb4dd738c02a2dd1 | Put all test metrics on the same row of the dataframe. | src/graph_world/models/benchmarker.py | src/graph_world/models/benchmarker.py | import json
import os
from abc import ABC, abstractmethod
import apache_beam as beam
import gin
import pandas as pd
class Benchmarker(ABC):
def __init__(self):
self._model_name = ''
def GetModelName(self):
return self._model_name
# Train and test the model.
# Arguments:
# * element: output of the 'Convert to torchgeo' beam stage.
# * output_path: where to save logs and data.
# Returns:
# * named dict with keys/vals:
# 'losses': iterable of loss values over the epochs.
# 'test_metrics': dict of named test metrics for the benchmark run.
@abstractmethod
def Benchmark(self, element):
del element # unused
return {'losses': [], 'test_metrics': {}}
class BenchmarkerWrapper(ABC):
@abstractmethod
def GetBenchmarker(self):
return Benchmarker()
# These two functions would be unnecessary if we were using Python 3.7. See:
# - https://github.com/huggingface/transformers/issues/8453
# - https://github.com/huggingface/transformers/issues/8212
@abstractmethod
def GetBenchmarkerClass(self):
return Benchmarker
@abstractmethod
def GetModelHparams(self):
return {}
class BenchmarkGNNParDo(beam.DoFn):
# The commented lines here, and those in process, could be uncommented and
# replace the alternate code below it, if we were using Python 3.7. See:
# - https://github.com/huggingface/transformers/issues/8453
# - https://github.com/huggingface/transformers/issues/8212
def __init__(self, benchmarker_wrappers):
# self._benchmarkers = [benchmarker_wrapper().GetBenchmarker() for
# benchmarker_wrapper in benchmarker_wrappers]
self._benchmarker_classes = [benchmarker_wrapper().GetBenchmarkerClass() for
benchmarker_wrapper in benchmarker_wrappers]
self._model_hparams = [benchmarker_wrapper().GetModelHparams() for
benchmarker_wrapper in benchmarker_wrappers]
# /end alternate code.
self._output_path = None
def SetOutputPath(self, output_path):
self._output_path = output_path
def process(self, element):
output_data = {}
# for benchmarer in self._benchmarkers:
for benchmarker_class, model_hparams in zip(self._benchmarker_classes, self._model_hparams):
sample_id = element['sample_id']
# benchmarker_out = self._benchmarker.Benchmark(element)
benchmarker = benchmarker_class(**model_hparams)
benchmarker_out = benchmarker.Benchmark(element)
# /end alternate code.
# Dump benchmark results to file.
benchmark_result = {
'sample_id': sample_id,
'losses': benchmarker_out['losses'],
'generator_config': element['generator_config']
}
benchmark_result.update(benchmarker_out['test_metrics'])
results_object_name = os.path.join(self._output_path, '{0:05}_results.txt'.format(sample_id))
with beam.io.filesystems.FileSystems.create(results_object_name, 'text/plain') as f:
buf = bytes(json.dumps(benchmark_result), 'utf-8')
f.write(buf)
f.close()
# Return benchmark data for next beam stage.
for key, value in benchmarker_out['test_metrics'].items():
output_data[
'%s__%s' % (benchmarker.GetModelName(), key)] = value
output_data.update(element['generator_config'])
output_data.update(element['metrics'])
yield pd.DataFrame(output_data, index=[sample_id]) | import json
import os
from abc import ABC, abstractmethod
import apache_beam as beam
import gin
import pandas as pd
class Benchmarker(ABC):
def __init__(self):
self._model_name = ''
def GetModelName(self):
return self._model_name
# Train and test the model.
# Arguments:
# * element: output of the 'Convert to torchgeo' beam stage.
# * output_path: where to save logs and data.
# Returns:
# * named dict with keys/vals:
# 'losses': iterable of loss values over the epochs.
# 'test_metrics': dict of named test metrics for the benchmark run.
@abstractmethod
def Benchmark(self, element):
del element # unused
return {'losses': [], 'test_metrics': {}}
class BenchmarkerWrapper(ABC):
@abstractmethod
def GetBenchmarker(self):
return Benchmarker()
# These two functions would be unnecessary if we were using Python 3.7. See:
# - https://github.com/huggingface/transformers/issues/8453
# - https://github.com/huggingface/transformers/issues/8212
@abstractmethod
def GetBenchmarkerClass(self):
return Benchmarker
@abstractmethod
def GetModelHparams(self):
return {}
class BenchmarkGNNParDo(beam.DoFn):
# The commented lines here, and those in process, could be uncommented and
# replace the alternate code below it, if we were using Python 3.7. See:
# - https://github.com/huggingface/transformers/issues/8453
# - https://github.com/huggingface/transformers/issues/8212
def __init__(self, benchmarker_wrappers):
# self._benchmarkers = [benchmarker_wrapper().GetBenchmarker() for
# benchmarker_wrapper in benchmarker_wrappers]
self._benchmarker_classes = [benchmarker_wrapper().GetBenchmarkerClass() for
benchmarker_wrapper in benchmarker_wrappers]
self._model_hparams = [benchmarker_wrapper().GetModelHparams() for
benchmarker_wrapper in benchmarker_wrappers]
# /end alternate code.
self._output_path = None
def SetOutputPath(self, output_path):
self._output_path = output_path
def process(self, element):
# for benchmarer in self._benchmarkers:
for benchmarker_class, model_hparams in zip(self._benchmarker_classes, self._model_hparams):
sample_id = element['sample_id']
# benchmarker_out = self._benchmarker.Benchmark(element)
benchmarker = benchmarker_class(**model_hparams)
benchmarker_out = benchmarker.Benchmark(element)
# /end alternate code.
# Dump benchmark results to file.
benchmark_result = {
'sample_id': sample_id,
'losses': benchmarker_out['losses'],
'generator_config': element['generator_config']
}
benchmark_result.update(benchmarker_out['test_metrics'])
results_object_name = os.path.join(self._output_path, '{0:05}_results.txt'.format(sample_id))
with beam.io.filesystems.FileSystems.create(results_object_name, 'text/plain') as f:
buf = bytes(json.dumps(benchmark_result), 'utf-8')
f.write(buf)
f.close()
# Return benchmark data for next beam stage.
output_data = benchmarker_out['test_metrics']
output_data.update(element['generator_config'])
output_data.update(element['metrics'])
output_data['model_name'] = benchmarker.GetModelName()
yield pd.DataFrame(output_data, index=[sample_id]) | Python | 0 |
886105ba5f4a8b53fbf5a39f7cb3dc48ce544a3a | add total days check | cgi-bin/oa-gdd.py | cgi-bin/oa-gdd.py | #!/usr/bin/python
"""
Produce a OA GDD Plot, dynamically!
$Id: $:
"""
import sys, os
sys.path.insert(0, '/mesonet/www/apps/iemwebsite/scripts/lib')
os.environ[ 'HOME' ] = '/tmp/'
os.environ[ 'USER' ] = 'nobody'
import iemplot
import cgi
import datetime
import network
import iemdb
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
form = cgi.FieldStorage()
if ("year1" in form and "year2" in form and
"month1" in form and "month2" in form and
"day1" in form and "day2" in form):
sts = datetime.datetime(int(form["year1"].value),
int(form["month1"].value), int(form["day1"].value))
ets = datetime.datetime(int(form["year2"].value),
int(form["month2"].value), int(form["day2"].value))
else:
sts = datetime.datetime(2011,5,1)
ets = datetime.datetime(2011,10,1)
baseV = 50
if "base" in form:
baseV = int(form["base"].value)
maxV = 86
if "max" in form:
maxV = int(form["max"].value)
# Make sure we aren't in the future
now = datetime.datetime.today()
if ets > now:
ets = now
st = network.Table("IACLIMATE")
# Now we load climatology
#sts = {}
#rs = mesosite.query("SELECT id, x(geom) as lon, y(geom) as lat from stations WHERE \
# network = 'IACLIMATE'").dictresult()
#for i in range(len(rs)):
# sts[ rs[i]["id"].lower() ] = rs[i]
# Compute normal from the climate database
sql = """SELECT station,
sum(gddXX(%s, %s, high, low)) as gdd, count(*)
from alldata_ia WHERE year = %s and day >= '%s' and day < '%s'
GROUP by station""" % (baseV, maxV, sts.year, sts.strftime("%Y-%m-%d"),
ets.strftime("%Y-%m-%d"))
lats = []
lons = []
gdd50 = []
valmask = []
ccursor.execute(sql)
total_days = (ets - sts).days
for row in ccursor:
id = row[0]
if not st.sts.has_key(id):
continue
if row[2] < (total_days * 0.9):
continue
lats.append( st.sts[id]['lat'] )
lons.append( st.sts[id]['lon'] )
gdd50.append( row[1] )
valmask.append( True )
cfg = {
'wkColorMap': 'BlAqGrYeOrRe',
'nglSpreadColorStart': 2,
'nglSpreadColorEnd' : -1,
'_showvalues' : True,
'_valueMask' : valmask,
'_format' : '%.0f',
'_title' : "Iowa %s thru %s GDD(base=%s,max=%s) Accumulation" % (
sts.strftime("%Y: %d %b"),
(ets - datetime.timedelta(days=1)).strftime("%d %b"),
baseV, maxV),
'lbTitleString' : "F",
}
# Generates tmp.ps
tmpfp = iemplot.simple_contour(lons, lats, gdd50, cfg)
iemplot.webprocess(tmpfp)
| #!/usr/bin/python
"""
Produce a OA GDD Plot, dynamically!
$Id: $:
"""
import sys, os
sys.path.insert(0, '/mesonet/www/apps/iemwebsite/scripts/lib')
os.environ[ 'HOME' ] = '/tmp/'
os.environ[ 'USER' ] = 'nobody'
import iemplot
import cgi
import datetime
import network
import iemdb
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
form = cgi.FieldStorage()
if ("year1" in form and "year2" in form and
"month1" in form and "month2" in form and
"day1" in form and "day2" in form):
sts = datetime.datetime(int(form["year1"].value),
int(form["month1"].value), int(form["day1"].value))
ets = datetime.datetime(int(form["year2"].value),
int(form["month2"].value), int(form["day2"].value))
else:
sts = datetime.datetime(2011,5,1)
ets = datetime.datetime(2011,10,1)
baseV = 50
if "base" in form:
baseV = int(form["base"].value)
maxV = 86
if "max" in form:
maxV = int(form["max"].value)
# Make sure we aren't in the future
now = datetime.datetime.today()
if ets > now:
ets = now
st = network.Table("IACLIMATE")
# Now we load climatology
#sts = {}
#rs = mesosite.query("SELECT id, x(geom) as lon, y(geom) as lat from stations WHERE \
# network = 'IACLIMATE'").dictresult()
#for i in range(len(rs)):
# sts[ rs[i]["id"].lower() ] = rs[i]
# Compute normal from the climate database
sql = """SELECT station,
sum(gddXX(%s, %s, high, low)) as gdd
from alldata_ia WHERE year = %s and day >= '%s' and day < '%s'
GROUP by station""" % (baseV, maxV, sts.year, sts.strftime("%Y-%m-%d"),
ets.strftime("%Y-%m-%d"))
lats = []
lons = []
gdd50 = []
valmask = []
ccursor.execute(sql)
for row in ccursor:
id = row[0]
if not st.sts.has_key(id):
continue
lats.append( st.sts[id]['lat'] )
lons.append( st.sts[id]['lon'] )
gdd50.append( row[1] )
valmask.append( True )
cfg = {
'wkColorMap': 'BlAqGrYeOrRe',
'nglSpreadColorStart': 2,
'nglSpreadColorEnd' : -1,
'_showvalues' : True,
'_valueMask' : valmask,
'_format' : '%.0f',
'_title' : "Iowa %s thru %s GDD(base=%s,max=%s) Accumulation" % (
sts.strftime("%Y: %d %b"),
(ets - datetime.timedelta(days=1)).strftime("%d %b"),
baseV, maxV),
'lbTitleString' : "F",
}
# Generates tmp.ps
tmpfp = iemplot.simple_contour(lons, lats, gdd50, cfg)
iemplot.webprocess(tmpfp)
| Python | 0.000006 |
53a86e2318256e6edcca3d1e4ce2981a29bd8208 | Add flask-email configs | web/config.py | web/config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfiguration(object):
DEBUG = False
TESTING = False
ADMINS = frozenset(['youremail@yourdomain.com'])
SECRET_KEY = 'SecretKeyForSessionSigning'
THREADS_PER_PAGE = 8
DATABASE = 'app.db'
DATABASE_PATH = os.path.join(basedir, DATABASE)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
SECURITY_PASSWORD_HASH = 'sha512_crypt'
SECURITY_PASSWORD_SALT = 'SuPeRsEcReTsAlT'
SECURITY_POST_LOGIN_VIEW = '/ViewProfile'
SECURITY_CHANGEABLE = True
SECURITY_REGISTERABLE = True
SECURITY_TRACKABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SECURITY_SEND_PASSWORD_CHANGE_EMAIL = False
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = False
MAIL_SUPPRESS_SEND = True
class DockerConfig(BaseConfiguration):
SECRET_KEY = os.environ.get('SECRET_KEY')
DB_NAME = os.environ.get('DB_NAME')
DB_USER = os.environ.get('DB_USER')
DB_PASS = os.environ.get('DB_PASS')
DB_SERVICE = os.environ.get('DB_SERVICE')
DB_PORT = os.environ.get('DB_PORT')
SQLALCHEMY_DATABASE_URI = 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(
DB_USER, DB_PASS, DB_SERVICE, DB_PORT, DB_NAME
)
RQ_DEFAULT_HOST="redis_1"
RQ_DEFAULT_PORT=6379
MAIL_SERVER = "smtp_server.usgo.org"
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = "noreply@usgo.org"
MAIL_PASSWORD = "password"
MAIL_DEFAULT_SENDER = "noreply@usgo.org"
class DebugConfiguration(DockerConfig):
DEBUG = True
class TestConfiguration(BaseConfiguration):
TESTING = True
DATABASE = 'tests.db'
DATABASE_PATH = os.path.join(basedir, DATABASE)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
| import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfiguration(object):
DEBUG = False
TESTING = False
ADMINS = frozenset(['youremail@yourdomain.com'])
SECRET_KEY = 'SecretKeyForSessionSigning'
THREADS_PER_PAGE = 8
DATABASE = 'app.db'
DATABASE_PATH = os.path.join(basedir, DATABASE)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
SECURITY_PASSWORD_HASH = 'sha512_crypt'
SECURITY_PASSWORD_SALT = 'SuPeRsEcReTsAlT'
SECURITY_POST_LOGIN_VIEW = '/ViewProfile'
SECURITY_CHANGEABLE = True
SECURITY_REGISTERABLE = True
SECURITY_TRACKABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SECURITY_SEND_PASSWORD_CHANGE_EMAIL = False
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = False
MAIL_SUPPRESS_SEND = True
class DockerConfig(BaseConfiguration):
SECRET_KEY = os.environ.get('SECRET_KEY')
DB_NAME = os.environ.get('DB_NAME')
DB_USER = os.environ.get('DB_USER')
DB_PASS = os.environ.get('DB_PASS')
DB_SERVICE = os.environ.get('DB_SERVICE')
DB_PORT = os.environ.get('DB_PORT')
SQLALCHEMY_DATABASE_URI = 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(
DB_USER, DB_PASS, DB_SERVICE, DB_PORT, DB_NAME
)
RQ_DEFAULT_HOST="redis_1"
RQ_DEFAULT_PORT=6379
class DebugConfiguration(DockerConfig):
DEBUG = True
class TestConfiguration(BaseConfiguration):
TESTING = True
DATABASE = 'tests.db'
DATABASE_PATH = os.path.join(basedir, DATABASE)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH
| Python | 0.000001 |
d08012b044e7340ce7f8c41ce5634d72f40de35d | Update test_server.py | server-functional/test_server.py | server-functional/test_server.py | import requests
import unittest
import json
import logging
import zlib
import sys
from colorama import init, Fore, Back, Style
logger = logging.getLogger('test_server')
ENDPOINT = "you_forgot_to_provide_the_endpoint_as_the_first_command_line_argument"
class TestErrorHandling(unittest.TestCase):
def check_parsable_but_not_ok(self):
try:
self.assertNotEqual(self.resp.json()["status"], "OK")
except Exception as e:
logger.warning(self.resp.text)
raise e
def test_missing_post_params(self):
self.resp = requests.post(ENDPOINT + "/upload")
def test_invalid_uploadpassword(self):
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":"a","metadata":"","chunknumber":1,"finishupload":False,"deletepassword":"loldonkey"})
self.unfinished = True
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":"b","chunknumber":2,"finishupload":True,"fileid":self.resp.json()["fileid"], "uploadpassword": self.resp.json()["uploadpassword"] + "BOB"})
self.unfinished = False
def test_chunk_too_big(self):
if 100*10**6 < self.__class__.serverinfo["max_filesize_bytes"]+1:
print(Fore.RED + "skipping test, max_filesize_bytes very big: {}".format(self.__class__.serverinfo["max_filesize_bytes"]) + Fore.RESET)
return
self.resp = requests.post(ENDPOINT + "/upload", data=zlib.compress(json.dumps({"cryptofile":"\x00"*(self.__class__.serverinfo["max_filesize_bytes"]+1),"metadata":"","chunknumber":0,"finishupload":True,"deletepassword":"loldonkey"}).encode("utf-8")), headers={'Content-Encoding': 'gzip'})
def test_chunk_zero_but_not_finishing(self):
""" it should not be possible to download chunks before the whole hushfile is finished """
self.reference = "a"
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":self.reference,"metadata":"","chunknumber":0,"finishupload":False,"deletepassword":"loldonkey"})
self.resp = requests.get(ENDPOINT + "/file", params={"chunknumber":0,"fileid":self.resp.json()["fileid"]})
def tearDown(self):
self.check_parsable_but_not_ok()
self.assertFalse(self.unfinished, self.resp.text)
def setUp(self):
self.unfinished = False
@classmethod
def setUpClass(cls):
cls.serverinfo = requests.get(ENDPOINT + "/serverinfo").json()
logger.info(cls.serverinfo)
class TestFileEquality(unittest.TestCase):
def test_basic_one_chunk_equality(self):
self.reference = "a"
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":self.reference,"metadata":"","chunknumber":0,"finishupload":True,"deletepassword":"loldonkey"})
self.resp = requests.get(ENDPOINT + "/file", params={"chunknumber":0,"fileid":self.resp.json()["fileid"]})
def test_compressed_one_chunk_equality(self):
self.reference = "a"
self.resp = requests.post(ENDPOINT + "/upload", data=zlib.compress(json.dumps({"cryptofile":self.reference,"metadata":"","chunknumber":0,"finishupload":True,"deletepassword":"loldonkey"}).encode("utf-8")), headers={'Content-Encoding': 'gzip'})
self.assertEqual(self.resp.json()["status"], "OK")
self.resp = requests.get(ENDPOINT + "/file", params={"chunknumber":0,"fileid":self.resp.json()["fileid"]})
def tearDown(self):
self.assertEqual(self.reference, self.resp.text)
if __name__ == "__main__":
global ENDPOINT
init()
logging.basicConfig(level="DEBUG")
if sys.hexversion < 0x03000000:
sys.exit("Python 3 is required to run this program.")
if len(sys.argv) > 1: ENDPOINT = sys.argv.pop()
unittest.main()
| import requests
import unittest
import json
import logging
logger = logging.getLogger('test_server')
ENDPOINT = "http://localhost:8801/api"
class TestErrorHandling(unittest.TestCase):
def check_parsable_but_not_ok(self):
try:
self.assertNotEqual(self.resp.json()["status"], "OK")
except Exception as e:
logger.warning(self.resp.text)
raise e
def test_missing_post_params(self):
self.resp = requests.post(ENDPOINT + "/upload")
def test_invalid_uploadpassword(self):
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":"a","metadata":"","chunknumber":1,"finishupload":False,"deletepassword":"loldonkey"})
self.unfinished = True
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":"b","chunknumber":2,"finishupload":True,"fileid":self.resp.json()["fileid"], "uploadpassword": self.resp.json()["uploadpassword"] + "BOB"})
self.unfinished = False
def test_chunk_too_big(self):
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":b"\x00"*(self.__class__.serverinfo["max_filesize_bytes"]+1),"metadata":"","chunknumber":0,"finishupload":True,"deletepassword":"loldonkey"})
def test_chunk_zero_but_not_finishing(self):
self.resp = requests.post(ENDPOINT + "/upload", data={"cryptofile":"a","metadata":"","chunknumber":0,"finishupload":False,"deletepassword":"loldonkey"})
def tearDown(self):
self.check_parsable_but_not_ok()
self.assertFalse(self.unfinished, self.resp.text)
def setUp(self):
self.unfinished = False
@classmethod
def setUpClass(cls):
cls.serverinfo = requests.get(ENDPOINT + "/serverinfo").json()
logger.info(cls.serverinfo)
if __name__ == "__main__":
logging.basicConfig(level="INFO")
unittest.main()
| Python | 0.000003 |
ff2958c25812fb9486e8611e44c93ba32b737866 | migrate res.company object to new API | l10n_br_stock_account/res_company.py | l10n_br_stock_account/res_company.py | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2011 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, fields
class ResCompany(models.Model):
_inherit = 'res.company'
stock_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão Estoque')
stock_in_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão de Entrada',
domain="[('journal_type', 'in', ('sale_refund', 'purchase')), "
"('fiscal_type', '=', 'product'), ('type', '=', 'input')]")
stock_out_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão Saída',
domain="[('journal_type', 'in', ('purchase_refund', 'sale')), "
"('fiscal_type', '=', 'product'), ('type', '=', 'output')]")
| # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2011 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp.osv import orm, fields
class res_company(orm.Model):
_inherit = 'res.company'
_columns = {
'stock_fiscal_category_id': fields.many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão Estoque'),
'stock_in_fiscal_category_id': fields.many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão de Entrada',
domain="[('journal_type', 'in', ('sale_refund', 'purchase')), "
"('fiscal_type', '=', 'product'), ('type', '=', 'input')]"),
'stock_out_fiscal_category_id': fields.many2one(
'l10n_br_account.fiscal.category',
u'Categoria Fiscal Padrão Saída',
domain="[('journal_type', 'in', ('purchase_refund', 'sale')), "
"('fiscal_type', '=', 'product'), ('type', '=', 'output')]"),
}
| Python | 0 |
5000eea27c511ad036f03b64e2be7dc69bac0845 | Add `JSONField` | jacquard/odm/fields.py | jacquard/odm/fields.py | import abc
import copy
class BaseField(object, metaclass=abc.ABCMeta):
def __init__(self, null=False, default=None):
self.null = null
self.default = default
@abc.abstractmethod
def transform_to_storage(self, value):
raise NotImplementedError()
@abc.abstractmethod
def transform_from_storage(self, value):
raise NotImplementedError()
def _learn_from_owner(self, owner):
if owner is None:
return
if hasattr(self, 'owner'):
return
self.owner = owner
for field_name, value in vars(owner):
if value is self:
self.name = field_name
def validate(self, raw_value):
if not self.null and raw_value is None:
raise ValueError("%s is not nullable" % self.name)
def __get__(self, obj, owner):
if obj is None:
self._learn_from_owner(owner)
return self
try:
raw_value = obj._fields[self.name]
except KeyError:
return self.default
return self.transform_from_storage(raw_value)
def __set__(self, obj, value):
self._learn_from_owner(type(obj))
if value is None:
obj._fields[self.name] = None
else:
obj._fields[self.name] = self.transform_to_storage(value)
if obj.session:
obj.session.mark_instance_dirty(obj)
def __set_name__(self, owner, name):
self.owner = owner
self.name = name
class TextField(BaseField):
def transform_to_storage(self, value):
return str(value)
def transform_from_storage(self, value):
return value
class JSONField(BaseField):
def transform_to_storage(self, value):
return copy.deepcopy(value)
def transform_from_storage(self, value):
return copy.deepcopy(value)
| import abc
class BaseField(object, metaclass=abc.ABCMeta):
def __init__(self, null=False, default=None):
self.null = null
self.default = default
@abc.abstractmethod
def transform_to_storage(self, value):
raise NotImplementedError()
@abc.abstractmethod
def transform_from_storage(self, value):
raise NotImplementedError()
def _learn_from_owner(self, owner):
if owner is None:
return
if hasattr(self, 'owner'):
return
self.owner = owner
for field_name, value in vars(owner):
if value is self:
self.name = field_name
def validate(self, raw_value):
if not self.null and raw_value is None:
raise ValueError("%s is not nullable" % self.name)
def __get__(self, obj, owner):
if obj is None:
self._learn_from_owner(owner)
return self
try:
raw_value = obj._fields[self.name]
except KeyError:
return self.default
return self.transform_from_storage(raw_value)
def __set__(self, obj, value):
self._learn_from_owner(type(obj))
if value is None:
obj._fields[self.name] = None
else:
obj._fields[self.name] = self.transform_to_storage(value)
if obj.session:
obj.session.mark_instance_dirty(obj)
def __set_name__(self, owner, name):
self.owner = owner
self.name = name
class TextField(BaseField):
def transform_to_storage(self, value):
return value
def transform_from_storage(self, value):
return value
| Python | 0 |
aa3a8ee76f85ef1c3c4c0beb7b6c46a0c69961f1 | allow absent of tornado | http2/__init__.py | http2/__init__.py | # -*- coding: utf-8 -*-
try:
from tornado import version_info
except ImportError:
pass
else:
if version_info[0] >= 4:
from http2.torando4 import *
| # -*- coding: utf-8 -*-
from tornado import version_info
if version_info[0] >= 4:
from http2.torando4 import *
else:
raise NotImplementedError()
| Python | 0.000096 |
ea1fbd21761b5fbe60f179988114320dcb93cf92 | remove unused attr | benchbuild/extensions/base.py | benchbuild/extensions/base.py | """
Extension base-classes for compile-time and run-time experiments.
"""
import collections as c
import logging
import typing as tp
from abc import ABCMeta
from benchbuild.utils import run
LOG = logging.getLogger(__name__)
class Extension(metaclass=ABCMeta):
"""An experiment functor to implement composable experiments.
An experiment extension is always callable with an arbitrary amount of
arguments. The varargs component of an extension's `__call__` operator
is fed the binary command that we currently execute and all arguments
to the binary.
Any customization necessary for the extension (e.g, dynamic configuration
options) has to be passed by keyword argument.
Args:
*extensions: Variable length list of child extensions we manage.
config (:obj:`dict`, optional): Dictionary of name value pairs to be
stored for this extension.
Attributes:
next_extensions: Variable length list of child extensions we manage.
config (:obj:`dict`, optional): Dictionary of name value pairs to be
stored for this extension.
"""
def __init__(self,
*extensions: 'Extension',
config: tp.Any = None,
**kwargs: tp.Any):
"""Initialize an extension with an arbitrary number of children."""
del kwargs
self.next_extensions = extensions
self.config = config
def call_next(self, *args: tp.Any,
**kwargs: tp.Any) -> tp.List[run.RunInfo]:
"""Call all child extensions with the given arguments.
This calls all child extensions and collects the results for
our own parent. Use this to control the execution of your
nested extensions from your own extension.
Returns:
:obj:`list` of :obj:`RunInfo`: A list of collected
results of our child extensions.
"""
all_results = []
for ext in self.next_extensions:
LOG.debug(" %s ", ext)
results = ext(*args, **kwargs)
LOG.debug(" %s => %s", ext, results)
if results is None:
LOG.warning("No result from: %s", ext)
continue
result_list = []
if isinstance(results, c.Iterable):
result_list.extend(results)
else:
result_list.append(results)
all_results.extend(result_list)
return all_results
def __lshift__(self, rhs: 'Extension') -> 'Extension':
rhs.next_extensions = [self]
return rhs
def print(self, indent: int = 0) -> None:
"""Print a structural view of the registered extensions."""
LOG.info("%s:: %s", indent * " ", self.__class__)
for ext in self.next_extensions:
ext.print(indent=indent + 2)
def __call__(self, *args, **kwargs) -> tp.List[run.RunInfo]:
return self.call_next(*args, **kwargs)
def __str__(self) -> str:
return "Extension"
class ExtensionRequired(ValueError):
pass
class MissingExtension(Extension):
"""
Hard fail at runtime, when the user forgets to set an extension.
This raises an exception as soon as a user forgets to provide an extension
for a project from the experiment.
This should be the earliest possible moment to fail, without restricting
existing old experiments.
"""
def __call__(self, *args, **kwargs) -> tp.List[run.RunInfo]:
raise ExtensionRequired()
| """
Extension base-classes for compile-time and run-time experiments.
"""
import collections as c
import logging
import typing as tp
from abc import ABCMeta
import attr
from benchbuild.utils import run
LOG = logging.getLogger(__name__)
class Extension(metaclass=ABCMeta):
"""An experiment functor to implement composable experiments.
An experiment extension is always callable with an arbitrary amount of
arguments. The varargs component of an extension's `__call__` operator
is fed the binary command that we currently execute and all arguments
to the binary.
Any customization necessary for the extension (e.g, dynamic configuration
options) has to be passed by keyword argument.
Args:
*extensions: Variable length list of child extensions we manage.
config (:obj:`dict`, optional): Dictionary of name value pairs to be
stored for this extension.
Attributes:
next_extensions: Variable length list of child extensions we manage.
config (:obj:`dict`, optional): Dictionary of name value pairs to be
stored for this extension.
"""
def __init__(self,
*extensions: 'Extension',
config: tp.Any = None,
**kwargs: tp.Any):
"""Initialize an extension with an arbitrary number of children."""
del kwargs
self.next_extensions = extensions
self.config = config
def call_next(self, *args: tp.Any,
**kwargs: tp.Any) -> tp.List[run.RunInfo]:
"""Call all child extensions with the given arguments.
This calls all child extensions and collects the results for
our own parent. Use this to control the execution of your
nested extensions from your own extension.
Returns:
:obj:`list` of :obj:`RunInfo`: A list of collected
results of our child extensions.
"""
all_results = []
for ext in self.next_extensions:
LOG.debug(" %s ", ext)
results = ext(*args, **kwargs)
LOG.debug(" %s => %s", ext, results)
if results is None:
LOG.warning("No result from: %s", ext)
continue
result_list = []
if isinstance(results, c.Iterable):
result_list.extend(results)
else:
result_list.append(results)
all_results.extend(result_list)
return all_results
def __lshift__(self, rhs: 'Extension') -> 'Extension':
rhs.next_extensions = [self]
return rhs
def print(self, indent: int = 0) -> None:
"""Print a structural view of the registered extensions."""
LOG.info("%s:: %s", indent * " ", self.__class__)
for ext in self.next_extensions:
ext.print(indent=indent + 2)
def __call__(self, *args, **kwargs) -> tp.List[run.RunInfo]:
return self.call_next(*args, **kwargs)
def __str__(self) -> str:
return "Extension"
class ExtensionRequired(ValueError):
pass
class MissingExtension(Extension):
"""
Hard fail at runtime, when the user forgets to set an extension.
This raises an exception as soon as a user forgets to provide an extension
for a project from the experiment.
This should be the earliest possible moment to fail, without restricting
existing old experiments.
"""
def __call__(self, *args, **kwargs) -> tp.List[run.RunInfo]:
raise ExtensionRequired()
| Python | 0.000018 |
50b7345c1dcb3c2fcc05fa61108fa1649ae17a0c | Add admin filters | django_iceberg/admin.py | django_iceberg/admin.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from django_iceberg.models import UserIcebergModel
class UserIcebergModelAdmin(admin.ModelAdmin):
list_display = ('user', 'environment', 'last_updated', 'application_namespace')
list_filter = ('environment', 'last_updated')
search_fields = ('user_username', 'user_first_name')
raw_id_fields = ("user",)
admin.site.register(UserIcebergModel, UserIcebergModelAdmin)
| # -*- coding: utf-8 -*-
from django.contrib import admin
from django_iceberg.models import UserIcebergModel
class UserIcebergModelAdmin(admin.ModelAdmin):
list_display = ('user', 'environment', 'last_updated', 'application_namespace')
raw_id_fields = ("user",)
admin.site.register(UserIcebergModel, UserIcebergModelAdmin)
| Python | 0 |
019a1ab10b71d4bb768e96957e9d485efeb588fc | add admin class for Attachment model --- djangobb_forum/admin.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) | djangobb_forum/admin.py | djangobb_forum/admin.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth.models import User
from djangobb_forum.models import Category, Forum, Topic, Post, Profile, Reputation, \
Report, Ban, Attachment
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'position', 'forum_count']
class ForumAdmin(admin.ModelAdmin):
list_display = ['name', 'category', 'position', 'topic_count']
raw_id_fields = ['moderators', 'last_post']
class TopicAdmin(admin.ModelAdmin):
list_display = ['name', 'forum', 'created', 'head', 'post_count']
search_fields = ['name']
raw_id_fields = ['user', 'subscribers', 'last_post']
class PostAdmin(admin.ModelAdmin):
list_display = ['topic', 'user', 'created', 'updated', 'summary']
search_fields = ['body']
raw_id_fields = ['topic', 'user', 'updated_by']
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'status', 'time_zone', 'location', 'language']
raw_id_fields = ['user']
class ReputationAdmin(admin.ModelAdmin):
list_display = ['from_user', 'to_user', 'post', 'sign', 'time', 'reason']
raw_id_fields = ['from_user', 'to_user', 'post']
class ReportAdmin(admin.ModelAdmin):
list_display = ['reported_by', 'post', 'zapped', 'zapped_by', 'created', 'reason']
raw_id_fields = ['reported_by', 'post']
class BanAdmin(admin.ModelAdmin):
list_display = ['user', 'ban_start', 'ban_end', 'reason']
raw_id_fields = ['user']
class UserAdmin(auth_admin.UserAdmin):
list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'is_active']
def get_urls(self):
from django.conf.urls.defaults import patterns, url
return patterns('',
url(r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password), name='user_change_password'),
) + super(auth_admin.UserAdmin, self).get_urls()
class AttachmentAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'size', 'path', 'hash', ]
search_fields = ['name']
list_display_links = ('name',)
list_filter = ("content_type",)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Forum, ForumAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Reputation, ReputationAdmin)
admin.site.register(Report, ReportAdmin)
admin.site.register(Ban, BanAdmin)
admin.site.register(Attachment, AttachmentAdmin)
admin.site.disable_action('delete_selected') #disabled, because delete_selected ignoring delete model method
| # -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth.models import User
from djangobb_forum.models import Category, Forum, Topic, Post, Profile, Reputation,\
Report, Ban
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'position', 'forum_count']
class ForumAdmin(admin.ModelAdmin):
list_display = ['name', 'category', 'position', 'topic_count']
raw_id_fields = ['moderators', 'last_post']
class TopicAdmin(admin.ModelAdmin):
list_display = ['name', 'forum', 'created', 'head', 'post_count']
search_fields = ['name']
raw_id_fields = ['user', 'subscribers', 'last_post']
class PostAdmin(admin.ModelAdmin):
list_display = ['topic', 'user', 'created', 'updated', 'summary']
search_fields = ['body']
raw_id_fields = ['topic', 'user', 'updated_by']
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'status', 'time_zone', 'location', 'language']
raw_id_fields = ['user']
class ReputationAdmin(admin.ModelAdmin):
list_display = ['from_user', 'to_user', 'post', 'sign', 'time', 'reason']
raw_id_fields = ['from_user', 'to_user', 'post']
class ReportAdmin(admin.ModelAdmin):
list_display = ['reported_by', 'post', 'zapped', 'zapped_by', 'created', 'reason']
raw_id_fields = ['reported_by', 'post']
class BanAdmin(admin.ModelAdmin):
list_display = ['user', 'ban_start', 'ban_end', 'reason']
raw_id_fields = ['user']
class UserAdmin(auth_admin.UserAdmin):
list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'is_active']
def get_urls(self):
from django.conf.urls.defaults import patterns, url
return patterns('',
url(r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password), name='user_change_password'),
) + super(auth_admin.UserAdmin, self).get_urls()
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Forum, ForumAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Reputation, ReputationAdmin)
admin.site.register(Report, ReportAdmin)
admin.site.register(Ban, BanAdmin)
admin.site.disable_action('delete_selected') #disabled, because delete_selected ignoring delete model method | Python | 0 |
8a6b88c38b2844fba03b6664fe828ebbd5a08a68 | use pkdlog so it passes test for pkdp | tests/pkdebug2_test.py | tests/pkdebug2_test.py | # -*- coding: utf-8 -*-
u"""pytest for `pykern.pkdebug`
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
def test_format(capsys):
from pykern import pkconfig
pkconfig.reset_state_for_testing({
'PYKERN_PKDEBUG_MAX_DEPTH': '2',
'PYKERN_PKDEBUG_MAX_ELEMENTS': '5',
'PYKERN_PKDEBUG_MAX_STRING': '5',
})
from pykern.pkdebug import pkdlog
def _e(expected, value):
pkdlog('{}', value)
out, err = capsys.readouterr()
err = ' '.join(err.split(' ')[1:])
assert expected + '\n' == err, 'expected={} actual={}'.format(expected, err)
_e(
"{'a': 'b', 'c': {'d': {<SNIP>}}, 'h': 'i'}",
{'a': 'b', 'c': {'d': {'e': {'f': 'g'}}}, 'h': 'i'},
)
_e(
'[1, 2, 3, 4, 5, 6, <SNIP>]',
[1, 2, 3, 4, 5, 6, 7, 8],
)
_e(
'(1, 2, 3, 4)',
(1, 2, 3, 4),
)
_e(
'(1, {2, 3}, {4: 5}, [6, 7])',
(1, {2, 3}, {4: 5}, [6, 7])
)
_e(
"{'Passw<SNIP>': '<REDA<SNIP>', 'c': {'botp': '<REDA<SNIP>'}, 'totp': '<REDA<SNIP>', 'q': ['pAssw<SNIP>', 1], 'x': 'y', 's': 'r', <SNIP>}",
{'Passwd': 'b', 'c': {'botp': 'a'}, 'totp': 'iiii', 'q': ['pAssword', 1], 'x': 'y', 's': 'r', 't': 'u'},
)
_e('a' * 5 + '<SNIP>', 'a' * 80)
_e('<SNIP>' + 'a' * 5, '\n File "' + 'a' * 80)
| # -*- coding: utf-8 -*-
u"""pytest for `pykern.pkdebug`
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
def test_format(capsys):
from pykern import pkconfig
pkconfig.reset_state_for_testing({
'PYKERN_PKDEBUG_MAX_DEPTH': '2',
'PYKERN_PKDEBUG_MAX_ELEMENTS': '5',
'PYKERN_PKDEBUG_MAX_STRING': '5',
})
from pykern.pkdebug import pkdp
def _e(expected, value):
pkdp('{}', value)
out, err = capsys.readouterr()
err = ' '.join(err.split(' ')[1:])
assert expected + '\n' == err, 'expected={} actual={}'.format(expected, err)
_e(
"{'a': 'b', 'c': {'d': {<SNIP>}}, 'h': 'i'}",
{'a': 'b', 'c': {'d': {'e': {'f': 'g'}}}, 'h': 'i'},
)
_e(
'[1, 2, 3, 4, 5, 6, <SNIP>]',
[1, 2, 3, 4, 5, 6, 7, 8],
)
_e(
'(1, 2, 3, 4)',
(1, 2, 3, 4),
)
_e(
'(1, {2, 3}, {4: 5}, [6, 7])',
(1, {2, 3}, {4: 5}, [6, 7])
)
_e(
"{'Passw<SNIP>': '<REDA<SNIP>', 'c': {'botp': '<REDA<SNIP>'}, 'totp': '<REDA<SNIP>', 'q': ['pAssw<SNIP>', 1], 'x': 'y', 's': 'r', <SNIP>}",
{'Passwd': 'b', 'c': {'botp': 'a'}, 'totp': 'iiii', 'q': ['pAssword', 1], 'x': 'y', 's': 'r', 't': 'u'},
)
_e('a' * 5 + '<SNIP>', 'a' * 80)
_e('<SNIP>' + 'a' * 5, '\n File "' + 'a' * 80)
| Python | 0 |
db4b63ee097116c5be711d1b6a69100065f1a885 | update format unicode | weby/utils.py | weby/utils.py | # coding=utf8
from datetime import datetime, date
import json
def format_dic(dic):
"""将 dic 格式化为 JSON,处理日期等特殊格式"""
for key, value in dic.iteritems():
dic[key] = format_value(value)
return dic
def format_value(value, include_fields=[], is_compact=True):
if isinstance(value, dict):
return format_dic(value)
elif isinstance(value, list):
return format_list(value)
elif isinstance(value, datetime):
return value.isoformat()
#elif isinstance(value, bool):
#return 1 if value else 0
elif hasattr(value, 'to_api_dic'):
return value.to_api_dic(include_fields, is_compact)
else:
try:
json.dumps(value)
return value
except:
return unicode(value)
def format_list(l):
return [format_value(x) for x in l]
| # coding=utf8
from datetime import datetime, date
def format_dic(dic):
"""将 dic 格式化为 JSON,处理日期等特殊格式"""
for key, value in dic.iteritems():
dic[key] = format_value(value)
return dic
def format_value(value):
if isinstance(value, dict):
return format_dic(value)
elif isinstance(value, list):
return format_list(value)
elif isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, date):
return value.isoformat()
#elif isinstance(value, API_V1_Mixture):
#return value.to_api_dic(is_compact=True)
else:
return value
def format_list(l):
return [format_value(x) for x in l]
| Python | 0.000009 |
72df56880ffaf0aba3b6f919d5a7f2add32623dc | Update binary_clock.py | binary_clock.py | binary_clock.py | __author__ = 'tim mcguire'
import datetime
import math
import Tkinter
import sys,os
def to_binary(dec, width):
x = width - 1
answer = ""
while x >= 0:
current_power = math.pow(2, x)
# how many powers of two fit into dec?
how_many = int(dec / current_power)
answer += str(how_many)
dec -= how_many * current_power
x -= 1
return answer
def draw_vertical_line(x):
main_canvas.create_line(x+17,start_y+20,x+17,start_y - 60)
def fill_dots(times_to_use, x,length):
tup = tens_and_ones(times_to_use)
for num in tup:
binary_string = to_binary(num, length)
length =4
x += right_step
y = start_y
for bit in reversed(binary_string):
coord = x, y, x + dot_size, y + dot_size
if bit == '1':
main_canvas.create_oval(coord, fill="red")
else:
main_canvas.create_oval(coord, fill="blue")
y -= 15
return x
def tens_and_ones(num):
tens = int(num / 10)
ones = num % 10
return tens, ones
def run(master):
t = datetime.datetime.now()
time_collection = t.hour, t.minute, t.second
x = 15
length =2
for val in time_collection:
# val is the numeric value, x is horizontal offset, length is how many dots tall the stack will be
x = fill_dots(val, x,length)
draw_vertical_line(x)
length =3
main_canvas.pack()
main_canvas.after(200, run, master)
time_format = 24
if len(sys.argv) >= 2:
time_format = sys.argv[1]
start_y = 150
right_step = 20
dot_size = 15
root = Tkinter.Tk()
root.geometry('300x200')
main_canvas = Tkinter.Canvas(root, bg="blue", height=300, width=200)
run(main_canvas)
root.mainloop()
| __author__ = 'tim mcguire'
import datetime
import math
import Tkinter
import sys,os
def to_binary(dec, width):
x = width - 1
answer = ""
while x >= 0:
current_power = math.pow(2, x)
# how many powers of two fit into dec?
how_many = int(dec / current_power)
answer += str(how_many)
dec -= how_many * current_power
x -= 1
return answer
def draw_vertical_line(x):
main_canvas.create_line(x+17,start_y+20,x+17,start_y - 60)
def fill_dots(times_to_use, x,length):
tup = tens_and_ones(times_to_use)
for num in tup:
binary_string = to_binary(num, length)
length =4
x += right_step
y = start_y
for bit in reversed(binary_string):
coord = x, y, x + dot_size, y + dot_size
if bit == '1':
main_canvas.create_oval(coord, fill="red")
else:
main_canvas.create_oval(coord, fill="blue")
y -= 15
return x
def tens_and_ones(num):
tens = int(num / 10)
ones = num % 10
return tens, ones
def run(master):
t = datetime.datetime.now()
time_collection = t.hour, t.minute, t.second
x = 15
length =2
for val in time_collection:
# val is the numeric value, x is horizontal offset, length is how many dots tall the stack will be
x = fill_dots(val, x,length)
draw_vertical_line(x)
length =3
main_canvas.pack()
main_canvas.after(200, run, master)
time_format = sys.argv[1]
start_y = 150
right_step = 20
dot_size = 15
root = Tkinter.Tk()
root.geometry('300x200')
main_canvas = Tkinter.Canvas(root, bg="blue", height=300, width=200)
run(main_canvas)
root.mainloop() | Python | 0.000002 |
36ca52e816a2938c6723e3ec2ed4a350958c78d8 | remove comments | binary_clock.py | binary_clock.py | __author__ = 'tim mcguire'
import datetime
import math
import Tkinter
def to_binary(dec, width):
x = width - 1
answer = ""
while x >= 0:
current_power = math.pow(2, x)
# how many powers of two fit into dec?
how_many = int(dec / current_power)
answer += str(how_many)
dec -= how_many * current_power
x -= 1
return answer
def fill_dots(times_to_use, x,length):
tup = tens_and_ones(times_to_use)
for num in tup:
binary_string = to_binary(num, length)
length =4
x += right_step
y = start_y
for bit in reversed(binary_string):
coord = x, y, x + dot_size, y + dot_size
if bit == '1':
main_canvas.create_oval(coord, fill="red")
else:
main_canvas.create_oval(coord, fill="blue")
y -= 15
return x
def tens_and_ones(num):
tens = int(num / 10)
ones = num % 10
return tens, ones
def run(master):
t = datetime.datetime.now()
time_collection = t.hour, t.minute, t.second
x = 15
length =2
for val in time_collection:
# val is the numeric value, x is horizontal offset, length is how many dots tall the stack will be
x = fill_dots(val, x,length)
length =3
main_canvas.pack()
main_canvas.after(200, run, master)
start_y = 150
right_step = 20
dot_size = 15
root = Tkinter.Tk()
root.geometry('300x200')
main_canvas = Tkinter.Canvas(root, bg="blue", height=300, width=200)
run(main_canvas)
root.mainloop() | __author__ = 'tim mcguire'
import datetime
import math
import Tkinter
def to_binary(dec, width):
x = width - 1
answer = ""
while x >= 0:
current_power = math.pow(2, x)
# how many powers of two fit into dec?
how_many = int(dec / current_power)
answer += str(how_many)
dec -= how_many * current_power
x -= 1
return answer
def fill_dots(times_to_use, x,length):
tup = tens_and_ones(times_to_use)
for num in tup:
#2,4,3,4,3,4
binary_string = to_binary(num, length)
length =4
x += right_step
y = start_y
for bit in reversed(binary_string):
coord = x, y, x + dot_size, y + dot_size
if bit == '1':
main_canvas.create_oval(coord, fill="red")
else:
main_canvas.create_oval(coord, fill="blue")
y -= 15
return x
def tens_and_ones(num):
tens = int(num / 10)
ones = num % 10
return tens, ones
def run(master):
t = datetime.datetime.now()
time_collection = t.hour, t.minute, t.second
x = 15
length =2
for val in time_collection:
# val is the numeric value, x is horizontal offset, length is how many dots tall the stack will be
x = fill_dots(val, x,length)
length =3
main_canvas.pack()
main_canvas.after(200, run, master)
start_y = 150
right_step = 20
dot_size = 15
root = Tkinter.Tk()
root.geometry('300x200')
main_canvas = Tkinter.Canvas(root, bg="blue", height=300, width=200)
run(main_canvas)
root.mainloop() | Python | 0 |
e59d6be5a31dbe775f6481d079f0f4e81a27a9ce | Add import of the re module to the utils module | classyfd/utils.py | classyfd/utils.py | """
Contains utility functions used within this library that are also useful
outside of it.
"""
import os
import pwd
import string
import random
import re
# Operating System Functions
def determine_if_os_is_posix_compliant():
"""
Determine if the operating system is POSIX compliant or not
Return Value:
(bool)
"""
return bool(os.name == "posix")
def determine_if_running_as_root_user():
"""
Determine if the user running Python is "root" or not
Supported Operating Systems:
Unix-like
Return Value:
(bool)
"""
# 0 is the UID used for most Unix-like systems for the root user. In the
# event that it's not, another check is done to see if the username is
# "root".
#
# For an explanation on why os.geteuid was used, instead of os.getuid,
# see: http://stackoverflow.com/a/14951764
is_running_as_root = bool(
os.geteuid() == 0 or
pwd.getpwuid(os.geteuid()).pw_name.lower() == "root"
)
return is_running_as_root
# File Functions
def get_random_file_name(directory):
"""
Generate a random, unique file name of 32 characters
The generated file name may include lowercase letters and numbers.
Parameters:
directory -- (str) the directory the file will be in. This will determine
the unique name given to it.
Return Value:
random_file_name -- (str) this is just a randomly generated file name, so
the full/absolute path is not included.
"""
CHARACTER_LENGTH = 32
NUMBERS = string.digits
LETTERS = string.ascii_lowercase
VALID_CHARACTERS = tuple(LETTERS + NUMBERS)
while True:
random_file_name = ""
for i in range(CHARACTER_LENGTH):
random_file_name += random.choice(VALID_CHARACTERS)
file_path_already_exists = os.path.exists(
os.path.join(directory, random_file_name)
)
if file_path_already_exists:
# Try again
continue
else:
# Sweet, use the generated file name
break
return random_file_name
| """
Contains utility functions used within this library that are also useful
outside of it.
"""
import os
import pwd
import string
import random
# Operating System Functions
def determine_if_os_is_posix_compliant():
"""
Determine if the operating system is POSIX compliant or not
Return Value:
(bool)
"""
return bool(os.name == "posix")
def determine_if_running_as_root_user():
"""
Determine if the user running Python is "root" or not
Supported Operating Systems:
Unix-like
Return Value:
(bool)
"""
# 0 is the UID used for most Unix-like systems for the root user. In the
# event that it's not, another check is done to see if the username is
# "root".
#
# For an explanation on why os.geteuid was used, instead of os.getuid,
# see: http://stackoverflow.com/a/14951764
is_running_as_root = bool(
os.geteuid() == 0 or
pwd.getpwuid(os.geteuid()).pw_name.lower() == "root"
)
return is_running_as_root
# File Functions
def get_random_file_name(directory):
"""
Generate a random, unique file name of 32 characters
The generated file name may include lowercase letters and numbers.
Parameters:
directory -- (str) the directory the file will be in. This will determine
the unique name given to it.
Return Value:
random_file_name -- (str) this is just a randomly generated file name, so
the full/absolute path is not included.
"""
CHARACTER_LENGTH = 32
NUMBERS = string.digits
LETTERS = string.ascii_lowercase
VALID_CHARACTERS = tuple(LETTERS + NUMBERS)
while True:
random_file_name = ""
for i in range(CHARACTER_LENGTH):
random_file_name += random.choice(VALID_CHARACTERS)
file_path_already_exists = os.path.exists(
os.path.join(directory, random_file_name)
)
if file_path_already_exists:
# Try again
continue
else:
# Sweet, use the generated file name
break
return random_file_name | Python | 0 |
00e865178f8e1762e7cd1ec8d44713d73cc58c47 | tidy up of DynTypedNode in python | clast/__init__.py | clast/__init__.py | import _clast
from _clast import *
def __get(self, kind):
return getattr(self, '_get_' + kind.__name__)()
# Monkey patch an extra method on that we can't do in C++
_clast.DynTypedNode.get = __get
| import _clast
from _clast import *
## REPRESENTATIVE CLASSES ONLY
def cxxRecordDecl(*args):
return _clast._cxxRecordDecl(list(args))
def decl(*args):
return _clast._decl(list(args))
def stmt(*args):
return _clast._stmt(list(args))
def forStmt(*args):
return _clast._forStmt(list(args))
def hasLoopInit(arg):
return _clast._hasLoopInit(arg)
def ifStmt(*args):
return _clast._ifStmt(list(args))
def hasCondition(expr):
return _clast._hasCondition(expr)
| Python | 0.000001 |
a3ad232c3f9734e94ed09088b260ff7f6bd722d7 | Fix wordExists() | Library.py | Library.py | import dataset
import re
from Generator import generateWord
db = None
phonemes = {}
allophones = {}
declensions = {}
categories = {}
def transcribePhonemes(word):
'''Transcribe from orthographic representation to phonetic
representation.
'''
for current, new in phonemes.items():
word = re.sub(current, new, word)
word = "/" + word + "/"
return word
def transcribeAllophones(word):
'''Transcribe from phonetic representation to full IPA
representation.
'''
word = word[1:-1]
for current, new in allophones.items():
word = re.sub(current, new, word)
word = "[" + word + "]"
return word
def getStatistics():
'''Returns number of words in database.'''
return len(db['words'])
def exportWords(filename):
'''Takes filename and outputs csv.'''
allWords = db['words'].all()
dataset.freeze(allWords, format='csv', filename=filename)
print("Exported all words to " + filename)
def searchWords(term):
'''Takes a search term. Returns tuple of two lists, the first
populated with matching English words and the second with
matching conlang words.
'''
englishresult = db['words'].find(english=term)
conlangresult = db['words'].find(word=term)
return (list(englishresult), list(conlangresult))
def getAvailableDeclensions():
'''Returns declension list.'''
return list(declensions)
def declineWord(word, d):
'''Declines word with declension d. Returns declined word.'''
dec = declensions[d].split("->")
word['word'] = re.sub(dec[0], dec[1], word['word'])
return word
def findConWord(term):
'''Finds the first occurrence of term in conlang column of database and
returns as a word.
'''
word = db['words'].find_one(word=term)
return word
def findEnglishWord(term):
'''Finds the first occurrence of term in English column of database
and returns as a word.
'''
word = db['words'].find_one(english=term)
return word
def wordExists(english=None, conlang=None):
'''Accepts string and searches for it in conlang words list and English words
list. If word exists in database, returns True, otherwise returns False.
'''
if conlang is not None:
if findConWord(conlang) is not None:
return True
if english is not None:
if findEnglishWord(english) is not None:
return True
return False
def getFields():
'''Returns list of fields, not including id, english, or word.'''
fields = db['words'].columns
fields.remove("english")
fields.remove("word")
fields.remove("id")
return fields
def getFieldOptions(field):
'''Takes a field. Returns all possible options for field that
exist within database.
'''
l = list(db['words'][field])
options = []
for item in l:
options.append(item[field])
if None in options:
options.remove(None)
return options
def listWords(t, f=None, o=None):
'''Takes type of list (full or specific form) and form. Returns list of
matching words.
'''
outList = []
if t == "all":
for word in db['words']:
outList.append(word)
elif t == "field":
q = 'SELECT * FROM words WHERE ' + f + ' LIKE "' + o + '"'
for word in db.query(q):
outList.append(word)
return outList
def addWord(word):
'''Takes word object and adds word to database.'''
db['words'].insert(word)
def setPhonemes(l):
global phonemes
phonemes = l
def setAllophones(l):
global allophones
allophones = l
def setCategories(l):
global categories
categories = l
def getCategories():
return categories
def setDeclensions(l):
global declensions
declensions = l
def loadDatabase(filename="words.db"):
global db
location = "sqlite:///" + filename
db = dataset.connect(location)
| import dataset
import re
from Generator import generateWord
db = None
phonemes = {}
allophones = {}
declensions = {}
categories = {}
def transcribePhonemes(word):
'''Transcribe from orthographic representation to phonetic
representation.
'''
for current, new in phonemes.items():
word = re.sub(current, new, word)
word = "/" + word + "/"
return word
def transcribeAllophones(word):
'''Transcribe from phonetic representation to full IPA
representation.
'''
word = word[1:-1]
for current, new in allophones.items():
word = re.sub(current, new, word)
word = "[" + word + "]"
return word
def getStatistics():
'''Returns number of words in database.'''
return len(db['words'])
def exportWords(filename):
'''Takes filename and outputs csv.'''
allWords = db['words'].all()
dataset.freeze(allWords, format='csv', filename=filename)
print("Exported all words to " + filename)
def searchWords(term):
'''Takes a search term. Returns tuple of two lists, the first
populated with matching English words and the second with
matching conlang words.
'''
englishresult = db['words'].find(english=term)
conlangresult = db['words'].find(word=term)
return (list(englishresult), list(conlangresult))
def getAvailableDeclensions():
'''Returns declension list.'''
return list(declensions)
def declineWord(word, d):
'''Declines word with declension d. Returns declined word.'''
dec = declensions[d].split("->")
word['word'] = re.sub(dec[0], dec[1], word['word'])
return word
def findConWord(term):
'''Finds the first occurrence of term in conlang column of database and
returns as a word.
'''
word = db['words'].find_one(word=term)
if word is None:
raise LookupError
else:
return word
def findEnglishWord(term):
'''Finds the first occurrence of term in English column of database
and returns as a word.
'''
word = db['words'].find_one(english=term)
if word is None:
raise LookupError
else:
return word
def wordExists(term):
'''Accepts string and searches for it in conlang words list and English words
list. If word exists in database, returns True, otherwise returns False.
'''
try:
findConWord(term)
findEnglishWord(term)
except LookupError:
return False
else:
return True
def getFields():
'''Returns list of fields, not including id, english, or word.'''
fields = db['words'].columns
fields.remove("english")
fields.remove("word")
fields.remove("id")
return fields
def getFieldOptions(field):
'''Takes a field. Returns all possible options for field that
exist within database.
'''
l = list(db['words'][field])
options = []
for item in l:
options.append(item[field])
if None in options:
options.remove(None)
return options
def listWords(t, f=None, o=None):
'''Takes type of list (full or specific form) and form. Returns list of
matching words.
'''
outList = []
if t == "all":
for word in db['words']:
outList.append(word)
elif t == "field":
q = 'SELECT * FROM words WHERE ' + f + ' LIKE "' + o + '"'
for word in db.query(q):
outList.append(word)
return outList
def addWord(word):
'''Takes word object and adds word to database.'''
db['words'].insert(word)
def setPhonemes(l):
global phonemes
phonemes = l
def setAllophones(l):
global allophones
allophones = l
def setCategories(l):
global categories
categories = l
def getCategories():
return categories
def setDeclensions(l):
global declensions
declensions = l
def loadDatabase(filename="words.db"):
global db
location = "sqlite:///" + filename
db = dataset.connect(location)
| Python | 0.001251 |
da1fc79f8eb476f7ed22d7969a1558ab6a1e3f5d | Use a name for the fabricated type that makes clearer it is fabricated | src/zeit/cms/content/add.py | src/zeit/cms/content/add.py | # Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import datetime
import grokcore.component as grok
import urllib
import zeit.cms.content.interfaces
import zeit.cms.repository.interfaces
import zope.browser.interfaces
import zope.component
import zope.interface
class ContentAdder(object):
zope.interface.implements(zeit.cms.content.interfaces.IContentAdder)
def __init__(self, request,
type_=None, ressort=None,
sub_ressort=None, year=None, month=None):
self.request = request
self.type_ = type_
self.ressort = ressort
self.sub_ressort = sub_ressort
now = datetime.date.today()
if year is None:
year = now.year
if month is None:
month = now.month
self.year = year
self.month = month
def __call__(self):
# we want to register the IAddLocation adapter for the content-type,
# which is an *interface*. We need a representative object providing
# that interface to be able to ask for those adapters, since
# zope.component looks for provides when an interface is required, and
# interfaces don't provide themselves.
dummy = type('Provides_' + self.type_.__name__, (object,), {})()
zope.interface.alsoProvides(dummy, self.type_)
context = zope.component.getMultiAdapter(
(dummy, self), zeit.cms.content.interfaces.IAddLocation)
params = {}
for key in ['ressort', 'sub_ressort']:
token = self._get_token(key)
if token is not None:
params['form.' + key] = token
return '%s/@@%s?%s' % (
zope.traversing.browser.absoluteURL(context, self.request),
self.type_.getTaggedValue('zeit.cms.addform'),
urllib.urlencode(params))
def _get_token(self, field,
interface=zeit.cms.content.interfaces.IContentAdder):
field = interface[field]
source = callable(field.source) and field.source(self) or field.source
terms = zope.component.getMultiAdapter(
(source, self.request), zope.browser.interfaces.ITerms)
value = field.get(self)
if not value:
return None
return terms.getTerm(value).token
@grok.adapter(
zeit.cms.interfaces.ICMSContent,
zeit.cms.content.interfaces.IContentAdder)
@grok.implementer(zeit.cms.content.interfaces.IAddLocation)
def ressort_year_folder(type_, adder):
ressort = adder.ressort and adder.ressort.lower()
sub_ressort = adder.sub_ressort and adder.sub_ressort.lower()
return find_or_create_folder(
ressort, sub_ressort, '%s-%02d' % (adder.year, int(adder.month)))
def find_or_create_folder(*path_elements):
repos = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
folder = repos
for elem in path_elements:
if elem is None:
continue
if elem not in folder:
folder[elem] = zeit.cms.repository.folder.Folder()
folder = folder[elem]
return folder
| # Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import datetime
import grokcore.component as grok
import urllib
import zeit.cms.content.interfaces
import zeit.cms.repository.interfaces
import zope.browser.interfaces
import zope.component
import zope.interface
class ContentAdder(object):
zope.interface.implements(zeit.cms.content.interfaces.IContentAdder)
def __init__(self, request,
type_=None, ressort=None,
sub_ressort=None, year=None, month=None):
self.request = request
self.type_ = type_
self.ressort = ressort
self.sub_ressort = sub_ressort
now = datetime.date.today()
if year is None:
year = now.year
if month is None:
month = now.month
self.year = year
self.month = month
def __call__(self):
# we want to register the IAddLocation adapter for the content-type,
# which is an *interface*. We need a representative object providing
# that interface to be able to ask for those adapters, since
# zope.component looks for provides when an interface is required, and
# interfaces don't provide themselves.
dummy = type(self.type_.__name__, (object,), {})()
zope.interface.alsoProvides(dummy, self.type_)
context = zope.component.getMultiAdapter(
(dummy, self), zeit.cms.content.interfaces.IAddLocation)
params = {}
for key in ['ressort', 'sub_ressort']:
token = self._get_token(key)
if token is not None:
params['form.' + key] = token
return '%s/@@%s?%s' % (
zope.traversing.browser.absoluteURL(context, self.request),
self.type_.getTaggedValue('zeit.cms.addform'),
urllib.urlencode(params))
def _get_token(self, field,
interface=zeit.cms.content.interfaces.IContentAdder):
field = interface[field]
source = callable(field.source) and field.source(self) or field.source
terms = zope.component.getMultiAdapter(
(source, self.request), zope.browser.interfaces.ITerms)
value = field.get(self)
if not value:
return None
return terms.getTerm(value).token
@grok.adapter(
zeit.cms.interfaces.ICMSContent,
zeit.cms.content.interfaces.IContentAdder)
@grok.implementer(zeit.cms.content.interfaces.IAddLocation)
def ressort_year_folder(type_, adder):
ressort = adder.ressort and adder.ressort.lower()
sub_ressort = adder.sub_ressort and adder.sub_ressort.lower()
return find_or_create_folder(
ressort, sub_ressort, '%s-%02d' % (adder.year, int(adder.month)))
def find_or_create_folder(*path_elements):
repos = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
folder = repos
for elem in path_elements:
if elem is None:
continue
if elem not in folder:
folder[elem] = zeit.cms.repository.folder.Folder()
folder = folder[elem]
return folder
| Python | 0.000018 |
a4db65ff4c5b3edd4739b0864f4e1641b37b3b87 | Remove wrong comment | setuptools/tests/test_logging.py | setuptools/tests/test_logging.py | import inspect
import logging
import os
import pytest
setup_py = """\
from setuptools import setup
setup(
name="test_logging",
version="0.0"
)
"""
@pytest.mark.parametrize(
"flag, expected_level", [("--dry-run", "INFO"), ("--verbose", "DEBUG")]
)
def test_verbosity_level(tmp_path, monkeypatch, flag, expected_level):
"""Make sure the correct verbosity level is set (issue #3038)"""
import setuptools # noqa: Import setuptools to monkeypatch distutils
import distutils # <- load distutils after all the patches take place
logger = logging.Logger(__name__)
monkeypatch.setattr(logging, "root", logger)
unset_log_level = logger.getEffectiveLevel()
assert logging.getLevelName(unset_log_level) == "NOTSET"
setup_script = tmp_path / "setup.py"
setup_script.write_text(setup_py)
dist = distutils.core.run_setup(setup_script, stop_after="init")
dist.script_args = [flag, "sdist"]
dist.parse_command_line() # <- where the log level is set
log_level = logger.getEffectiveLevel()
log_level_name = logging.getLevelName(log_level)
assert log_level_name == expected_level
def test_patching_does_not_cause_problems():
# Ensure `dist.log` is only patched if necessary
import setuptools.logging
from distutils import dist
setuptools.logging.configure()
if os.getenv("SETUPTOOLS_USE_DISTUTILS", "local").lower() == "local":
# Modern logging infra, no problematic patching.
assert isinstance(dist.log, logging.Logger)
else:
assert inspect.ismodule(dist.log)
| import inspect
import logging
import os
import pytest
setup_py = """\
from setuptools import setup
setup(
name="test_logging",
version="0.0"
)
"""
@pytest.mark.parametrize(
"flag, expected_level", [("--dry-run", "INFO"), ("--verbose", "DEBUG")]
)
def test_verbosity_level(tmp_path, monkeypatch, flag, expected_level):
"""Make sure the correct verbosity level is set (issue #3038)"""
import setuptools # noqa: Import setuptools to monkeypatch distutils
import distutils # <- load distutils after all the patches take place
logger = logging.Logger(__name__)
monkeypatch.setattr(logging, "root", logger)
unset_log_level = logger.getEffectiveLevel()
assert logging.getLevelName(unset_log_level) == "NOTSET"
setup_script = tmp_path / "setup.py"
setup_script.write_text(setup_py)
dist = distutils.core.run_setup(setup_script, stop_after="init")
dist.script_args = [flag, "sdist"]
dist.parse_command_line() # <- where the log level is set
log_level = logger.getEffectiveLevel()
log_level_name = logging.getLevelName(log_level)
assert log_level_name == expected_level
def test_patching_does_not_cause_problems():
# Ensure `dist.log` is only patched if necessary
import setuptools.logging
from distutils import dist # <- load distutils after all the patches take place
setuptools.logging.configure()
if os.getenv("SETUPTOOLS_USE_DISTUTILS", "local").lower() == "local":
# Modern logging infra, no problematic patching.
assert isinstance(dist.log, logging.Logger)
else:
assert inspect.ismodule(dist.log)
| Python | 0 |
ef4e84d2defbf4899f0a1745fce5162e2510c1f7 | test "merge-patches --help" | rhcephpkg/tests/test_merge_patches.py | rhcephpkg/tests/test_merge_patches.py | import pytest
import subprocess
from rhcephpkg import MergePatches
from rhcephpkg.tests.util import CallRecorder
def git(*args):
""" shortcut for shelling out to git """
cmd = ['git'] + list(args)
subprocess.check_call(cmd)
class TestMergePatches(object):
def test_help(self, capsys):
mergep = MergePatches(['rhcephpkg', 'merge-patches', '--help'])
with pytest.raises(SystemExit):
mergep.main()
out, _ = capsys.readouterr()
assert "Fetch the latest patches branch that rdopkg uses" in out
def test_on_debian_branch(self, testpkg, monkeypatch):
# set our current branch to be a debian branch:
git('checkout', 'ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run()
# Verify that we run the "git fetch" command here.
expected = ['git', 'fetch', '.',
'patches/ceph-2-rhel-patches:patch-queue/ceph-2-ubuntu']
assert recorder.args == expected
def test_on_patch_queue_branch(self, testpkg, monkeypatch):
# set our current branch to be a patch-queue branch:
git('checkout', 'patch-queue/ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run()
# Verify that we run the "git merge" command here.
expected = ['git', 'pull', '--ff-only', 'patches/ceph-2-rhel-patches']
assert recorder.args == expected
def test_force_on_debian_branch(self, testpkg, monkeypatch):
# set current_branch() to a debian branch:
git('checkout', 'ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run(force=True)
# Verify that we run the "git push" command here.
expected = ['git', 'push', '.',
'+patches/ceph-2-rhel-patches:patch-queue/ceph-2-ubuntu']
assert recorder.args == expected
def test_force_on_patch_queue_branch(self, testpkg, monkeypatch):
# set current_branch() to a patch-queue branch:
git('checkout', 'patch-queue/ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run(force=True)
# Verify that we run the "git reset" command here.
expected = ['git', 'reset', '--hard', 'patches/ceph-2-rhel-patches']
assert recorder.args == expected
class TestMergePatchesRhelPatchesBranch(object):
@pytest.mark.parametrize('debian_branch,expected', [
('ceph-1.3-ubuntu', 'ceph-1.3-rhel-patches'),
('ceph-2-ubuntu', 'ceph-2-rhel-patches'),
('ceph-2-trusty', 'ceph-2-rhel-patches'),
('ceph-2-xenial', 'ceph-2-rhel-patches'),
('someotherproduct-2-ubuntu', 'someotherproduct-2-rhel-patches'),
('ceph-2-ubuntu-hotfix-bz123', 'ceph-2-rhel-patches-hotfix-bz123'),
('ceph-2-ubuntu-test-bz456', 'ceph-2-rhel-patches-test-bz456'),
])
def test_get_rhel_patches_branch(self, debian_branch, expected):
m = MergePatches([])
assert m.get_rhel_patches_branch(debian_branch) == expected
| import pytest
import subprocess
from rhcephpkg import MergePatches
from rhcephpkg.tests.util import CallRecorder
def git(*args):
""" shortcut for shelling out to git """
cmd = ['git'] + list(args)
subprocess.check_call(cmd)
class TestMergePatches(object):
def test_on_debian_branch(self, testpkg, monkeypatch):
# set our current branch to be a debian branch:
git('checkout', 'ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run()
# Verify that we run the "git fetch" command here.
expected = ['git', 'fetch', '.',
'patches/ceph-2-rhel-patches:patch-queue/ceph-2-ubuntu']
assert recorder.args == expected
def test_on_patch_queue_branch(self, testpkg, monkeypatch):
# set our current branch to be a patch-queue branch:
git('checkout', 'patch-queue/ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run()
# Verify that we run the "git merge" command here.
expected = ['git', 'pull', '--ff-only', 'patches/ceph-2-rhel-patches']
assert recorder.args == expected
def test_force_on_debian_branch(self, testpkg, monkeypatch):
# set current_branch() to a debian branch:
git('checkout', 'ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run(force=True)
# Verify that we run the "git push" command here.
expected = ['git', 'push', '.',
'+patches/ceph-2-rhel-patches:patch-queue/ceph-2-ubuntu']
assert recorder.args == expected
def test_force_on_patch_queue_branch(self, testpkg, monkeypatch):
# set current_branch() to a patch-queue branch:
git('checkout', 'patch-queue/ceph-2-ubuntu')
recorder = CallRecorder()
monkeypatch.setattr('subprocess.check_call', recorder)
localbuild = MergePatches([])
localbuild._run(force=True)
# Verify that we run the "git reset" command here.
expected = ['git', 'reset', '--hard', 'patches/ceph-2-rhel-patches']
assert recorder.args == expected
class TestMergePatchesRhelPatchesBranch(object):
@pytest.mark.parametrize('debian_branch,expected', [
('ceph-1.3-ubuntu', 'ceph-1.3-rhel-patches'),
('ceph-2-ubuntu', 'ceph-2-rhel-patches'),
('ceph-2-trusty', 'ceph-2-rhel-patches'),
('ceph-2-xenial', 'ceph-2-rhel-patches'),
('someotherproduct-2-ubuntu', 'someotherproduct-2-rhel-patches'),
('ceph-2-ubuntu-hotfix-bz123', 'ceph-2-rhel-patches-hotfix-bz123'),
('ceph-2-ubuntu-test-bz456', 'ceph-2-rhel-patches-test-bz456'),
])
def test_get_rhel_patches_branch(self, debian_branch, expected):
m = MergePatches([])
assert m.get_rhel_patches_branch(debian_branch) == expected
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.