commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
198d4944e961fd998d6e896b3e75ca2e815ffaa5 | Add log to file function for vimapt package | src/vimapt/library/vimapt/__init__.py | src/vimapt/library/vimapt/__init__.py | import logging
logging.basicConfig(filename='/var/log/vimapt.log', level=logging.INFO)
logger = logging.getLogger(__name__)
| Python | 0 | |
a84dde598297495fe6f0f8b233b3a3761b0df7d4 | Update test to check newer logic | tests/functional/test_warning.py | tests/functional/test_warning.py | import textwrap
def test_environ(script, tmpdir):
"""$PYTHONWARNINGS was added in python2.7"""
demo = tmpdir.join('warnings_demo.py')
demo.write(textwrap.dedent('''
from logging import basicConfig
from pip._internal.utils import deprecation
deprecation.install_warning_logger()
basicConfig()
deprecation.deprecated("deprecated!", replacement=None, gone_in=None)
'''))
result = script.run('python', demo, expect_stderr=True)
expected = 'WARNING:pip._internal.deprecations:DEPRECATION: deprecated!\n'
assert result.stderr == expected
script.environ['PYTHONWARNINGS'] = 'ignore'
result = script.run('python', demo)
assert result.stderr == ''
|
def test_environ(script, tmpdir):
"""$PYTHONWARNINGS was added in python2.7"""
demo = tmpdir.join('warnings_demo.py')
demo.write('''
from pip._internal.utils import deprecation
deprecation.install_warning_logger()
from logging import basicConfig
basicConfig()
from warnings import warn
warn("deprecated!", deprecation.PipDeprecationWarning)
''')
result = script.run('python', demo, expect_stderr=True)
assert result.stderr == \
'ERROR:pip._internal.deprecations:DEPRECATION: deprecated!\n'
script.environ['PYTHONWARNINGS'] = 'ignore'
result = script.run('python', demo)
assert result.stderr == ''
| Python | 0 |
d49668dfb76e148fab6e878b2d1944a5e70a3c38 | fix test_cookie test on windows | tests/integration/test_cookie.py | tests/integration/test_cookie.py | # vim:ts=4:sw=4:et:
# Copyright 2018-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import socket
import pywatchman
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestCookie(WatchmanTestCase.WatchmanTestCase):
def test_delete_cookie_dir(self):
root = self.mkdtemp()
cookie_dir = os.path.join(root, ".hg")
os.mkdir(cookie_dir)
self.touchRelative(root, "foo")
self.watchmanCommand("watch-project", root)
self.assertFileList(root, files=["foo", ".hg"])
os.rmdir(cookie_dir)
self.assertFileList(root, files=["foo"])
os.unlink(os.path.join(root, "foo"))
self.assertFileList(root, files=[])
os.rmdir(root)
with self.assertRaises(pywatchman.WatchmanError) as ctx:
result = self.assertFileList(root, files=[])
print("Should not have gotten here, but the result was:", result)
reason = str(ctx.exception)
self.assertTrue(
("No such file" in reason)
or ("root dir was removed" in reason)
or ("The system cannot find the file specified" in reason)
or ("unable to resolve root" in reason),
msg=reason,
)
def test_other_cookies(self):
root = self.mkdtemp()
cookie_dir = os.path.join(root, ".git")
os.mkdir(cookie_dir)
self.watchmanCommand("watch", root)
host = socket.gethostname()
pid = self.watchmanCommand("get-pid")["pid"]
self.assertFileList(root, files=[".git"])
os.mkdir(os.path.join(root, "foo"))
# Same process, same watch
self.touchRelative(root, ".git/.watchman-cookie-%s-%d-1000000" % (host, pid))
cookies = [
# Same process, different watch root
"foo/.watchman-cookie-%s-%d-100000" % (host, pid),
# Same process, root dir instead of VCS dir
".watchman-cookie-%s-%d-100000" % (host, pid),
# Different process, same watch root
".git/.watchman-cookie-%s-1-100000" % host,
# Different process, root dir instead of VCS dir
".watchman-cookie-%s-1-100000" % host,
# Different process, different watch root
"foo/.watchman-cookie-%s-1-100000" % host,
]
for cookie in cookies:
self.touchRelative(root, cookie)
self.assertFileList(root, files=["foo", ".git"] + cookies)
| # vim:ts=4:sw=4:et:
# Copyright 2018-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import socket
import pywatchman
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestCookie(WatchmanTestCase.WatchmanTestCase):
def test_delete_cookie_dir(self):
root = self.mkdtemp()
cookie_dir = os.path.join(root, ".hg")
os.mkdir(cookie_dir)
self.touchRelative(root, "foo")
self.watchmanCommand("watch-project", root)
self.assertFileList(root, files=["foo", ".hg"])
os.rmdir(cookie_dir)
self.assertFileList(root, files=["foo"])
os.unlink(os.path.join(root, "foo"))
self.assertFileList(root, files=[])
os.rmdir(root)
with self.assertRaises(pywatchman.WatchmanError) as ctx:
result = self.assertFileList(root, files=[])
print("Should not have gotten here, but the result was:", result)
reason = str(ctx.exception)
self.assertTrue(
("No such file" in reason)
or ("root dir was removed" in reason)
or ("unable to resolve root" in reason),
msg=reason,
)
def test_other_cookies(self):
root = self.mkdtemp()
cookie_dir = os.path.join(root, ".git")
os.mkdir(cookie_dir)
self.watchmanCommand("watch", root)
host = socket.gethostname()
pid = self.watchmanCommand("get-pid")["pid"]
self.assertFileList(root, files=[".git"])
os.mkdir(os.path.join(root, "foo"))
# Same process, same watch
self.touchRelative(root, ".git/.watchman-cookie-%s-%d-1000000" % (host, pid))
cookies = [
# Same process, different watch root
"foo/.watchman-cookie-%s-%d-100000" % (host, pid),
# Same process, root dir instead of VCS dir
".watchman-cookie-%s-%d-100000" % (host, pid),
# Different process, same watch root
".git/.watchman-cookie-%s-1-100000" % host,
# Different process, root dir instead of VCS dir
".watchman-cookie-%s-1-100000" % host,
# Different process, different watch root
"foo/.watchman-cookie-%s-1-100000" % host,
]
for cookie in cookies:
self.touchRelative(root, cookie)
self.assertFileList(root, files=["foo", ".git"] + cookies)
| Python | 0.000001 |
a08c54d524e166d913c7e395e6a36cca76243df4 | add sqlite no-op tests | tests/integration/test_sqlite.py | tests/integration/test_sqlite.py | import os
import unittest
from threading import Thread
from unittest.mock import patch
from requests_cache.backends.sqlite import DbDict, DbPickleDict
from tests.integration.test_backends import BaseStorageTestCase
class SQLiteTestCase(BaseStorageTestCase):
def tearDown(self):
try:
os.unlink(self.NAMESPACE)
except Exception:
pass
def test_bulk_commit(self):
d = self.storage_class(self.NAMESPACE, self.TABLES[0])
with d.bulk_commit():
pass
d.clear()
n = 1000
with d.bulk_commit():
for i in range(n):
d[i] = i
assert list(d.keys()) == list(range(n))
def test_switch_commit(self):
d = self.storage_class(self.NAMESPACE)
d.clear()
d[1] = 1
d = self.storage_class(self.NAMESPACE)
assert 1 in d
d._can_commit = False
d[2] = 2
d = self.storage_class(self.NAMESPACE)
assert 2 not in d
assert d._can_commit is True
def test_fast_save(self):
d1 = self.storage_class(self.NAMESPACE, fast_save=True)
d2 = self.storage_class(self.NAMESPACE, self.TABLES[1], fast_save=True)
d1.clear()
n = 1000
for i in range(n):
d1[i] = i
d2[i * 2] = i
# HACK if we will not sort, fast save can produce different order of records
assert sorted(d1.keys()) == list(range(n))
assert sorted(d2.values()) == list(range(n))
def test_usage_with_threads(self):
def do_test_for(d, n_threads=5):
d.clear()
def do_inserts(values):
for v in values:
d[v] = v
def values(x, n):
return [i * x for i in range(n)]
threads = [Thread(target=do_inserts, args=(values(i, n_threads),)) for i in range(n_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
for i in range(n_threads):
for x in values(i, n_threads):
assert d[x] == x
do_test_for(self.storage_class(self.NAMESPACE))
do_test_for(self.storage_class(self.NAMESPACE, fast_save=True), 20)
do_test_for(self.storage_class(self.NAMESPACE, fast_save=True))
do_test_for(self.storage_class(self.NAMESPACE, self.TABLES[1], fast_save=True))
def test_noop(self):
def do_noop_bulk(d):
with d.bulk_commit():
pass
del d
d = self.storage_class(self.NAMESPACE)
t = Thread(target=do_noop_bulk, args=(d,))
t.start()
t.join()
# make sure connection is not closed by the thread
d[0] = 0
assert str(d) == "{0: 0}"
class DbDictTestCase(SQLiteTestCase, unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, storage_class=DbDict, **kwargs)
class DbPickleDictTestCase(SQLiteTestCase, unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, storage_class=DbPickleDict, picklable=True, **kwargs)
@patch('requests_cache.backends.sqlite.sqlite3')
def test_connection_kwargs(mock_sqlite):
"""A spot check to make sure optional connection kwargs gets passed to connection"""
DbDict('test', timeout=0.5, invalid_kwarg='???')
mock_sqlite.connect.assert_called_with('test', timeout=0.5)
| import os
import unittest
from threading import Thread
from unittest.mock import patch
from requests_cache.backends.sqlite import DbDict, DbPickleDict
from tests.integration.test_backends import BaseStorageTestCase
class SQLiteTestCase(BaseStorageTestCase):
def tearDown(self):
try:
os.unlink(self.NAMESPACE)
except Exception:
pass
def test_bulk_commit(self):
d = self.storage_class(self.NAMESPACE, self.TABLES[0])
with d.bulk_commit():
pass
d.clear()
n = 1000
with d.bulk_commit():
for i in range(n):
d[i] = i
assert list(d.keys()) == list(range(n))
def test_switch_commit(self):
d = self.storage_class(self.NAMESPACE)
d.clear()
d[1] = 1
d = self.storage_class(self.NAMESPACE)
assert 1 in d
d._can_commit = False
d[2] = 2
d = self.storage_class(self.NAMESPACE)
assert 2 not in d
assert d._can_commit is True
def test_fast_save(self):
d1 = self.storage_class(self.NAMESPACE, fast_save=True)
d2 = self.storage_class(self.NAMESPACE, self.TABLES[1], fast_save=True)
d1.clear()
n = 1000
for i in range(n):
d1[i] = i
d2[i * 2] = i
# HACK if we will not sort, fast save can produce different order of records
assert sorted(d1.keys()) == list(range(n))
assert sorted(d2.values()) == list(range(n))
def test_usage_with_threads(self):
def do_test_for(d, n_threads=5):
d.clear()
def do_inserts(values):
for v in values:
d[v] = v
def values(x, n):
return [i * x for i in range(n)]
threads = [Thread(target=do_inserts, args=(values(i, n_threads),)) for i in range(n_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
for i in range(n_threads):
for x in values(i, n_threads):
assert d[x] == x
do_test_for(self.storage_class(self.NAMESPACE))
do_test_for(self.storage_class(self.NAMESPACE, fast_save=True), 20)
do_test_for(self.storage_class(self.NAMESPACE, fast_save=True))
do_test_for(self.storage_class(self.NAMESPACE, self.TABLES[1], fast_save=True))
class DbDictTestCase(SQLiteTestCase, unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, storage_class=DbDict, **kwargs)
class DbPickleDictTestCase(SQLiteTestCase, unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, storage_class=DbPickleDict, picklable=True, **kwargs)
@patch('requests_cache.backends.sqlite.sqlite3')
def test_connection_kwargs(mock_sqlite):
"""A spot check to make sure optional connection kwargs gets passed to connection"""
DbDict('test', timeout=0.5, invalid_kwarg='???')
mock_sqlite.connect.assert_called_with('test', timeout=0.5)
| Python | 0.000002 |
21e95ff23a4ceca06d4bfd291f0e2b29b896af2f | Add tests for timeout and listen stop | tests/test_listener.py | tests/test_listener.py | #!/usr/bin/env python
import argparse
import os
import pytest
import pg_bawler.core
import pg_bawler.listener
class NotificationListener(
pg_bawler.core.BawlerBase,
pg_bawler.core.ListenerMixin
):
pass
class NotificationSender(
pg_bawler.core.BawlerBase,
pg_bawler.core.SenderMixin
):
pass
# TODO: Maybe as a pytest fixtures?
connection_params = dict(
dbname=os.environ.get('POSTGRES_DB', 'bawler_test'),
user=os.environ.get('POSTGRES_USER', 'postgres'),
host=os.environ.get('POSTGRES_HOST'),
password=os.environ.get('POSTGRES_PASSWORD', ''))
def test_register_handlers():
listener = pg_bawler.core.ListenerMixin()
assert listener.register_handler('channel', 'handler') is None
assert listener.registered_channels['channel'] == ['handler']
listener.unregister_handler('channel', 'handler')
assert listener.registered_channels['channel'] == []
listener.unregister_handler('channel', 'handler')
def test_default_cli_parser():
parser = pg_bawler.listener.get_default_cli_args_parser()
assert isinstance(parser, argparse.ArgumentParser)
def test_resolve_handler():
handler = pg_bawler.listener.resolve_handler(
'pg_bawler.listener:default_handler')
assert handler is pg_bawler.listener.default_handler
@pytest.mark.asyncio
async def test_simple_listen():
nl = NotificationListener(connection_params=connection_params)
ns = NotificationSender(connection_params=connection_params)
payload = 'aaa'
channel_name = 'pg_bawler_test'
await nl.register_channel(channel='pg_bawler_test')
await ns.send(channel=channel_name, payload=payload)
notification = await nl.get_notification()
assert notification.channel == channel_name
assert notification.payload == payload
@pytest.mark.asyncio
async def test_get_notification_timeout():
nl = NotificationListener(connection_params=connection_params)
nl.listen_timeout = 0
await nl.register_channel(channel='pg_bawler_test')
notification = await nl.get_notification()
assert notification is None
@pytest.mark.asyncio
async def test_stop_on_timeout():
nl = NotificationListener(connection_params=connection_params)
nl.listen_timeout = 0
nl.stop_on_timeout = True
await nl.register_channel(channel='pg_bawler_test')
notification = await nl.get_notification()
assert notification is None
assert nl.is_stopped
@pytest.mark.asyncio
async def test_stop_listener():
nl = NotificationListener(connection_params=connection_params)
await nl.stop()
await nl.listen()
# @pytest.mark.asyncio
# async def test_listener_main():
# ns = NotificationSender(connection_params=connection_params)
# payload = 'pg_bawler_test'
#
# async def handler(notification, listener):
# assert notification.payload == payload
# listener.stop()
#
# pg_bawler.listener._main(
# connection_params=connection_params,
# channel='pg_bawler_test',
# handler=handler)
# @pytest.mark.asyncio
# async def test_listener_main(event_loop):
# ns = NotificationSender(connection_params=connection_params)
# nl = NotificationListener(connection_params=connection_params)
# payload = 'pg_bawler_test'
#
# async def handler(notification, listener):
# assert notification.payload == payload
# await listener.stop()
#
# nl.timeout = 5
# nl.register_handler('channel', handler)
# await nl.register_channel('channel')
# event_loop.create_task(ns.send(channel='channel', payload=payload))
# await nl.listen()
| #!/usr/bin/env python
import argparse
import os
import pytest
import pg_bawler.core
import pg_bawler.listener
class NotificationListener(
pg_bawler.core.BawlerBase,
pg_bawler.core.ListenerMixin
):
pass
class NotificationSender(
pg_bawler.core.BawlerBase,
pg_bawler.core.SenderMixin
):
pass
# TODO: Maybe as a pytest fixtures?
connection_params = dict(
dbname=os.environ.get('POSTGRES_DB', 'bawler_test'),
user=os.environ.get('POSTGRES_USER', 'postgres'),
host=os.environ.get('POSTGRES_HOST'),
password=os.environ.get('POSTGRES_PASSWORD', ''))
def test_register_handlers():
listener = pg_bawler.core.ListenerMixin()
assert listener.register_handler(None) == 0
assert listener.register_handler(True) == 1
assert listener.unregister_handler(None)
assert not listener.unregister_handler(None)
def test_default_cli_parser():
parser = pg_bawler.listener.get_default_cli_args_parser()
assert isinstance(parser, argparse.ArgumentParser)
def test_resolve_handler():
handler = pg_bawler.listener.resolve_handler(
'pg_bawler.listener:default_handler')
assert handler is pg_bawler.listener.default_handler
@pytest.mark.asyncio
async def test_simple_listen():
connection_params = dict(
dbname=os.environ.get('POSTGRES_DB', 'bawler_test'),
user=os.environ.get('POSTGRES_USER', 'postgres'),
host=os.environ.get('POSTGRES_HOST'),
password=os.environ.get('POSTGRES_PASSWORD', ''))
nl = NotificationListener(connection_params=connection_params)
ns = NotificationSender(connection_params=connection_params)
payload = 'aaa'
channel_name = 'pg_bawler_test'
await nl.register_channel(channel='pg_bawler_test')
await ns.send(channel=channel_name, payload=payload)
notification = await nl.get_notification()
assert notification.channel == channel_name
assert notification.payload == payload
@pytest.mark.asyncio
async def test_get_notification_timeout():
nl = NotificationListener(connection_params=connection_params)
nl.listen_timeout = 0
await nl.register_channel(channel='pg_bawler_test')
notification = await nl.get_notification()
assert notification is None
| Python | 0 |
9c92cf39a69bbc6a078a8ffd7fcd8ea8f95b2678 | fix tests | tests/test_payments.py | tests/test_payments.py | # Test cases can be run with either of the following:
# python -m unittest discover
# nosetests -v --rednose --nologcapture
import unittest
from app import payments
from db import app_db, models
class TestModels(unittest.TestCase):
def setUp(self):
payments.app.debug = True
payments.app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://payments:payments@localhost:5432/test'
app_db.drop_all() # clean up the last tests
app_db.create_all() # make our sqlalchemy tables
data = {'nickname' : 'my credit', 'user_id' : 1, 'payment_type' : 'credit',
'details' : {'user_name' : 'Jimmy Jones', 'card_number' : '1111222233334444',
'expires' : '01/2019', 'card_type' : 'Mastercard'}}
payment = models.Payment()
payment.deserialize(data)
app_db.session.add(payment)
app_db.session.commit()
self.app = payments.app.test_client()
def tearDown(self):
app_db.session.remove()
app_db.drop_all()
def test_db_has_one_item(self):
p1 = app_db.session.query(models.Payment).get(1)
self.assertNotEqual(p1, None)
p2 = app_db.session.query(models.Payment).get(2)
self.assertEqual(p2, None)
def test_credit_has_no_paypal_fields(self):
payment = db.session.query(models.Payment).get(1)
self.assertEqual(payment.nickname, 'my credit')
detail = payment.details
self.assertEqual(detail.is_linked, None)
self.assertEqual(detail.user_email, None)
| # Test cases can be run with either of the following:
# python -m unittest discover
# nosetests -v --rednose --nologcapture
import unittest
import db
from app import payments
from db import db, models
class TestModels(unittest.TestCase):
def setUp(self):
payments.app.debug = True
payments.app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://payments:payments@localhost:5432/test'
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
data = {'nickname' : 'my credit', 'user_id' : 1, 'payment_type' : 'credit',
'details' : {'user_name' : 'Jimmy Jones', 'card_number' : '1111222233334444',
'expires' : '01/2019', 'card_type' : 'Mastercard'}}
payment = models.Payment()
payment.deserialize(data)
db.session.add(payment)
db.session.commit()
self.app = payments.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_db_has_one_item(self):
p1 = db.session.query(models.Payment).get(1)
self.assertNotEqual(p1, None)
p2 = db.session.query(models.Payment).get(2)
self.assertEqual(p2, None)
def test_credit_has_no_paypal_fields(self):
payment = db.session.query(models.Payment).get(1)
self.assertEqual(payment.nickname, 'my credit')
detail = payment.details
self.assertEqual(detail.is_linked, None)
self.assertEqual(detail.user_email, None)
| Python | 0.000001 |
00f15f47f8eeabf336e0e2a71cda48aaef270f85 | Comment out apparently-unused code. | build/getversion.py | build/getversion.py | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# getversion.py - Parse version numbers from C header files.
#
import os
import re
import sys
__all__ = ['Parser', 'Result']
class Result:
pass
class Parser:
def __init__(self):
self.patterns = {}
def search(self, define_name, value_name):
'Add the name of a define to the list of search pattenrs.'
self.patterns[define_name] = value_name
def parse(self, file):
'Parse the file, extracting defines into a Result object.'
stream = open(file, 'rt')
result = Result()
regex = re.compile(r'^\s*#\s*define\s+(\w+)\s+(\d+)')
for line in stream.readlines():
match = regex.match(line)
if match:
try:
name = self.patterns[match.group(1)]
except:
continue
setattr(result, name, int(match.group(2)))
stream.close()
return result
def svn_extractor(parser, include_file):
'''Pull values from svn.version.h'''
p.search('SVN_VER_MAJOR', 'major')
p.search('SVN_VER_MINOR', 'minor')
p.search('SVN_VER_PATCH', 'patch')
try:
r = p.parse(include_file)
except IOError, e:
usage_and_exit(str(e))
sys.stdout.write("%d.%d.%d" % (r.major, r.minor, r.patch))
def sqlite_extractor(parser, include_file):
'''Pull values from sqlite3.h'''
p.search('SQLITE_VERSION_NUMBER', 'version')
try:
r = p.parse(include_file)
except IOError, e:
usage_and_exit(str(e))
major = r.version / 1000000
minor = (r.version - (major * 1000000)) / 1000
micro = (r.version - (major * 1000000) - (minor * 1000))
sys.stdout.write("%d.%d.%d" % (major, minor, micro))
extractors = {
'SVN' : svn_extractor,
# 'SQLITE' : sqlite_extractor, # not used
}
def usage_and_exit(msg):
if msg:
sys.stderr.write("%s\n\n" % msg)
sys.stderr.write("usage: %s [SVN|SQLITE] [header_file]\n" % \
os.path.basename(sys.argv[0]))
sys.stderr.flush()
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) == 3:
extractor = extractors[sys.argv[1]]
include_file = sys.argv[2]
else:
usage_and_exit("Incorrect number of arguments")
# Extract and print the version number
p = Parser()
extractor(p, include_file)
| #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# getversion.py - Parse version numbers from C header files.
#
import os
import re
import sys
__all__ = ['Parser', 'Result']
class Result:
pass
class Parser:
def __init__(self):
self.patterns = {}
def search(self, define_name, value_name):
'Add the name of a define to the list of search pattenrs.'
self.patterns[define_name] = value_name
def parse(self, file):
'Parse the file, extracting defines into a Result object.'
stream = open(file, 'rt')
result = Result()
regex = re.compile(r'^\s*#\s*define\s+(\w+)\s+(\d+)')
for line in stream.readlines():
match = regex.match(line)
if match:
try:
name = self.patterns[match.group(1)]
except:
continue
setattr(result, name, int(match.group(2)))
stream.close()
return result
def svn_extractor(parser, include_file):
'''Pull values from svn.version.h'''
p.search('SVN_VER_MAJOR', 'major')
p.search('SVN_VER_MINOR', 'minor')
p.search('SVN_VER_PATCH', 'patch')
try:
r = p.parse(include_file)
except IOError, e:
usage_and_exit(str(e))
sys.stdout.write("%d.%d.%d" % (r.major, r.minor, r.patch))
def sqlite_extractor(parser, include_file):
'''Pull values from sqlite3.h'''
p.search('SQLITE_VERSION_NUMBER', 'version')
try:
r = p.parse(include_file)
except IOError, e:
usage_and_exit(str(e))
major = r.version / 1000000
minor = (r.version - (major * 1000000)) / 1000
micro = (r.version - (major * 1000000) - (minor * 1000))
sys.stdout.write("%d.%d.%d" % (major, minor, micro))
extractors = {
'SVN' : svn_extractor,
'SQLITE' : sqlite_extractor,
}
def usage_and_exit(msg):
if msg:
sys.stderr.write("%s\n\n" % msg)
sys.stderr.write("usage: %s [SVN|SQLITE] [header_file]\n" % \
os.path.basename(sys.argv[0]))
sys.stderr.flush()
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) == 3:
extractor = extractors[sys.argv[1]]
include_file = sys.argv[2]
else:
usage_and_exit("Incorrect number of arguments")
# Extract and print the version number
p = Parser()
extractor(p, include_file)
| Python | 0 |
ee0f31857028a68116f2912054877f37bd64683a | fix vdsClient connections | ovirt_hosted_engine_ha/broker/submonitor_util.py | ovirt_hosted_engine_ha/broker/submonitor_util.py | #
# ovirt-hosted-engine-ha -- ovirt hosted engine high availability
# Copyright (C) 2013 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
import socket
import time
from otopi import util
from vdsm import vdscli
from . import constants
def run_vds_client_cmd(address, use_ssl, command):
"""
Run the passed in command name from the vdsClient library and either
throw an exception with the error message or return the results.
"""
# FIXME pass context to allow for shared or persistent vdsm connection
log = logging.getLogger('SubmonitorUtil')
log.debug("Connecting to vdsClient at %s with ssl=%r", address, use_ssl)
vdsClient = util.loadModule(
path=constants.VDS_CLIENT_DIR,
name='vdsClient'
)
if vdsClient._glusterEnabled:
serv = vdsClient.ge.GlusterService()
else:
serv = vdsClient.service()
serv.use_ssl = use_ssl
if hasattr(vdscli, 'cannonizeAddrPort'):
server, server_port = vdscli.cannonizeAddrPort(
address
).split(':', 1)
serv.do_connect(server, server_port)
else:
host_port = vdscli.cannonizeHostPort(address)
serv.do_connect(host_port)
log.debug("Connected")
method = getattr(serv.s, command)
retry = 0
while retry < constants.VDS_CLIENT_MAX_RETRY:
try:
response = method()
break
except socket.error:
log.debug("Error", exc_info=True)
retry += 1
time.sleep(1)
if retry >= constants.VDS_CLIENT_MAX_RETRY:
raise Exception("VDSM initialization timeout")
if response['status']['code'] != 0:
raise Exception("Error {0} from {1}: {2}",
response['status']['code'], command,
response['status']['message'])
return response
| #
# ovirt-hosted-engine-ha -- ovirt hosted engine high availability
# Copyright (C) 2013 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
import socket
import time
from otopi import util
from vdsm import vdscli
from . import constants
def run_vds_client_cmd(address, use_ssl, command):
"""
Run the passed in command name from the vdsClient library and either
throw an exception with the error message or return the results.
"""
# FIXME pass context to allow for shared or persistent vdsm connection
log = logging.getLogger('SubmonitorUtil')
log.debug("Connecting to vdsClient at %s with ssl=%r", address, use_ssl)
vdsClient = util.loadModule(
path=constants.VDS_CLIENT_DIR,
name='vdsClient'
)
if vdsClient._glusterEnabled:
serv = vdsClient.ge.GlusterService()
else:
serv = vdsClient.service()
serv.use_ssl = use_ssl
if hasattr(vdscli, 'cannonizeAddrPort'):
server, server_port = vdscli.cannonizeAddrPort(
address
).split(':', 1)
serv.do_connect(server, server_port)
else:
host_port = vdscli.cannonizeHostPort(address)
serv.do_connect(host_port)
serv.do_connect(server, server_port)
log.debug("Connected")
method = getattr(serv.s, command)
retry = 0
while retry < constants.VDS_CLIENT_MAX_RETRY:
try:
response = method()
break
except socket.error:
log.debug("Error", exc_info=True)
retry += 1
time.sleep(1)
if retry >= constants.VDS_CLIENT_MAX_RETRY:
raise Exception("VDSM initialization timeout")
if response['status']['code'] != 0:
raise Exception("Error {0} from {1}: {2}",
response['status']['code'], command,
response['status']['message'])
return response
| Python | 0 |
727b42a1cdec461d715b845872c321326ce18554 | Load aliases on module load | Modules/Alias.py | Modules/Alias.py | from ModuleInterface import ModuleInterface
from IRCResponse import IRCResponse, ResponseType
import GlobalVars
class Alias(ModuleInterface):
triggers = ["alias"]
help = 'alias <alias> <command> <params> - aliases <alias> to the specified command and parameters\n' \
'you can specify where parameters given to the alias should be inserted with $1, $2, $n. ' \
'you can use $1+, $2+ for all parameters after the first, second one' \
'The whole parameter string is $0. $sender and $channel can also be used.'
def onLoad(self):
self.bot.moduleHandler.commandAliases = self.bot.moduleHandler.loadAliases()
def onTrigger(self, message):
if message.User.Name not in GlobalVars.admins:
return IRCResponse(ResponseType.Say, "Only my admins may create new aliases!", message.ReplyTo)
if len(message.ParameterList) <= 1:
return IRCResponse(ResponseType.Say, "Alias what?", message.ReplyTo)
triggerFound = False
for (name, module) in self.bot.moduleHandler.modules.items():
if message.ParameterList[0] in module.triggers:
return IRCResponse(ResponseType.Say, "'{}' is already a command!".format(message.ParameterList[0]), message.ReplyTo)
if message.ParameterList[1] in module.triggers:
triggerFound = True
if not triggerFound:
return IRCResponse(ResponseType.Say, "'{}' is not a valid command!".format(message.ParameterList[1]), message.ReplyTo)
if message.ParameterList[0] in self.bot.moduleHandler.commandAliases.keys():
return IRCResponse(ResponseType.Say, "'{}' is already an alias!".format(message.ParameterList[0]), message.ReplyTo)
newAlias = []
for word in message.ParameterList[1:]:
newAlias.append(word.lower())
self.bot.moduleHandler.commandAliases[message.ParameterList[0]] = newAlias
self.bot.moduleHandler.newAlias(message.ParameterList[0], newAlias)
return IRCResponse(ResponseType.Say, "Created a new alias '{}' for '{}'.".format(message.ParameterList[0], " ".join(message.ParameterList[1:])), message.ReplyTo) | from ModuleInterface import ModuleInterface
from IRCResponse import IRCResponse, ResponseType
import GlobalVars
class Alias(ModuleInterface):
triggers = ["alias"]
help = 'alias <alias> <command> <params> - aliases <alias> to the specified command and parameters\n' \
'you can specify where parameters given to the alias should be inserted with $1, $2, $n. ' \
'you can use $1+, $2+ for all parameters after the first, second one' \
'The whole parameter string is $0. $sender and $channel can also be used.'
def onTrigger(self, message):
if message.User.Name not in GlobalVars.admins:
return IRCResponse(ResponseType.Say, "Only my admins may create new aliases!", message.ReplyTo)
if len(message.ParameterList) <= 1:
return IRCResponse(ResponseType.Say, "Alias what?", message.ReplyTo)
triggerFound = False
for (name, module) in self.bot.moduleHandler.modules.items():
if message.ParameterList[0] in module.triggers:
return IRCResponse(ResponseType.Say, "'{}' is already a command!".format(message.ParameterList[0]), message.ReplyTo)
if message.ParameterList[1] in module.triggers:
triggerFound = True
if not triggerFound:
return IRCResponse(ResponseType.Say, "'{}' is not a valid command!".format(message.ParameterList[1]), message.ReplyTo)
if message.ParameterList[0] in self.bot.moduleHandler.commandAliases.keys():
return IRCResponse(ResponseType.Say, "'{}' is already an alias!".format(message.ParameterList[0]), message.ReplyTo)
newAlias = []
for word in message.ParameterList[1:]:
newAlias.append(word.lower())
self.bot.moduleHandler.commandAliases[message.ParameterList[0]] = newAlias
self.bot.moduleHandler.newAlias(message.ParameterList[0], newAlias)
return IRCResponse(ResponseType.Say, "Created a new alias '{}' for '{}'.".format(message.ParameterList[0], " ".join(message.ParameterList[1:])), message.ReplyTo) | Python | 0 |
52cf1efd8b1f721d65732d16b171040d83d02b21 | fix test_workflow | tests/test_workflow.py | tests/test_workflow.py | from unittest import TestCase
from dvc.graph.workflow import Workflow
from dvc.graph.commit import Commit
class TestWorkflow(TestCase):
def setUp(self):
self._commit4 = Commit('4', '3', 'name1', 'today', 'comment4')
self._commit3 = Commit('3', '2', 'name1', 'today', 'DVC repro-run ...')
self._commit2 = Commit('2', '1', 'name1', 'today', 'DVC repro-run ...')
self._commit1 = Commit('1', '', 'name1', 'today', 'comment1')
def commits_basic_test(self):
self.assertFalse(self._commit1.is_repro)
self.assertTrue(self._commit2.is_repro)
self.assertTrue(self._commit3.is_repro)
self.assertFalse(self._commit4.is_repro)
pass
def workflow_basic_test(self):
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(self._commit2)
wf.add_commit(self._commit3)
wf.add_commit(self._commit4)
self.assertEqual(len(wf._commits), 4)
self.assertEqual(wf._commits['1'].text,
self._commit1._comment + '\n' + self._commit1._text_hash())
self.assertEqual(wf._commits['2'].text,
self._commit2._comment + '\n' + self._commit2._text_hash())
self.assertEqual(wf._commits['3'].text,
self._commit3._comment + '\n' + self._commit3._text_hash())
self.assertEqual(wf._commits['4'].text,
self._commit4._comment + '\n' + self._commit4._text_hash())
pass
def collapse_test(self):
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(self._commit2)
wf.add_commit(self._commit3)
wf.add_commit(self._commit4)
wf.collapse_repro_commits()
self.assertEqual(len(wf._commits), 3)
self.assertEqual(wf._commits[self._commit1.hash].text,
self._commit1._comment + '\n' + self._commit1._text_hash())
self.assertEqual(wf._commits[self._commit3.hash].text, Commit.COLLAPSED_TEXT)
self.assertTrue('2' not in wf._commits)
self.assertFalse('2' in wf._edges)
self.assertFalse('2' in wf._back_edges)
pass
def collapse_at_dead_end_test(self):
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(self._commit2)
wf.add_commit(self._commit3) # Dead end which cannot be collapsed
self.assertEqual(len(wf._commits), 3)
wf.collapse_repro_commits()
self.assertEqual(len(wf._commits), 2)
self.assertEqual(wf._commits[self._commit1.hash].text,
self._commit1._comment + '\n' + self._commit1._text_hash())
self.assertEqual(wf._commits[self._commit3.hash].text, Commit.COLLAPSED_TEXT)
self.assertTrue('2' not in wf._commits)
pass
def collapse_metric_commit_test(self):
value = 0.812345
branches = ['master', 'try_smth']
metric_commit3 = Commit('2', '1', 'name1', 'today', 'DVC repro-run ...',
True, value, branch_tips=branches)
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(metric_commit3)
wf.add_commit(self._commit3)
self.assertEqual(len(wf._commits), 3)
wf.collapse_repro_commits()
self.assertEqual(len(wf._commits), 2)
self.assertEqual(wf._commits['3']._target_metric, value)
self.assertEqual(wf._commits['3'].branch_tips, branches)
pass
| from unittest import TestCase
from dvc.graph.workflow import Workflow
from dvc.graph.commit import Commit
class TestWorkflow(TestCase):
def setUp(self):
self._commit4 = Commit('4', '3', 'name1', 'today', 'comment4')
self._commit3 = Commit('3', '2', 'name1', 'today', 'DVC repro-run ...')
self._commit2 = Commit('2', '1', 'name1', 'today', 'DVC repro-run ...')
self._commit1 = Commit('1', '', 'name1', 'today', 'comment1')
def commits_basic_test(self):
self.assertFalse(self._commit1.is_repro)
self.assertTrue(self._commit2.is_repro)
self.assertTrue(self._commit3.is_repro)
self.assertFalse(self._commit4.is_repro)
pass
def workflow_basic_test(self):
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(self._commit2)
wf.add_commit(self._commit3)
wf.add_commit(self._commit4)
self.assertEqual(len(wf._commits), 4)
self.assertEqual(wf._commits['1'].text, self._commit1._comment + '\n' + self._commit1.hash)
self.assertEqual(wf._commits['2'].text, self._commit2._comment + '\n' + self._commit2.hash)
self.assertEqual(wf._commits['3'].text, self._commit3._comment + '\n' + self._commit3.hash)
self.assertEqual(wf._commits['4'].text, self._commit4._comment + '\n' + self._commit4.hash)
pass
def collapse_test(self):
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(self._commit2)
wf.add_commit(self._commit3)
wf.add_commit(self._commit4)
wf.collapse_repro_commits()
self.assertEqual(len(wf._commits), 3)
self.assertEqual(wf._commits[self._commit1.hash].text, self._commit1._comment + '\n' + self._commit1.hash)
self.assertEqual(wf._commits[self._commit3.hash].text, Commit.COLLAPSED_TEXT)
self.assertTrue('2' not in wf._commits)
self.assertFalse('2' in wf._edges)
self.assertFalse('2' in wf._back_edges)
pass
def collapse_at_dead_end_test(self):
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(self._commit2)
wf.add_commit(self._commit3) # Dead end which cannot be collapsed
self.assertEqual(len(wf._commits), 3)
wf.collapse_repro_commits()
self.assertEqual(len(wf._commits), 2)
self.assertEqual(wf._commits[self._commit1.hash].text, self._commit1._comment + '\n' + self._commit1.hash)
self.assertEqual(wf._commits[self._commit3.hash].text, Commit.COLLAPSED_TEXT)
self.assertTrue('2' not in wf._commits)
pass
def collapse_metric_commit_test(self):
value = 0.812345
branches = ['master', 'try_smth']
metric_commit3 = Commit('2', '1', 'name1', 'today', 'DVC repro-run ...',
True, value, branch_tips=branches)
wf = Workflow('', '')
wf.add_commit(self._commit1)
wf.add_commit(metric_commit3)
wf.add_commit(self._commit3)
self.assertEqual(len(wf._commits), 3)
wf.collapse_repro_commits()
self.assertEqual(len(wf._commits), 2)
self.assertEqual(wf._commits['3']._target_metric, value)
self.assertEqual(wf._commits['3'].branch_tips, branches)
pass | Python | 0 |
ede603fd2b63f101174d4312ed77f710aaaeec3a | comment out test for `test_data_split_nlu` | tests/cli/test_rasa_data.py | tests/cli/test_rasa_data.py | import argparse
import os
from unittest.mock import Mock
import pytest
from collections import namedtuple
from typing import Callable, Text
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import RunResult
from rasa.cli import data
from rasa.importers.importer import TrainingDataImporter
from rasa.validator import Validator
def test_data_split_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"data", "split", "nlu", "-u", "data/nlu.yml", "--training-fraction", "0.75"
)
assert os.path.exists("train_test_split")
# TODO: Comment back in as soon as NLU YAML writer is merged
# https://github.com/RasaHQ/rasa/issues/6363
# assert os.path.exists(os.path.join("train_test_split", "test_data.md"))
# assert os.path.exists(os.path.join("train_test_split", "training_data.md"))
def test_data_convert_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"data",
"convert",
"nlu",
"--data",
"data/nlu.yml",
"--out",
"out_nlu_data.json",
"-f",
"json",
)
assert os.path.exists("out_nlu_data.json")
def test_data_split_help(run: Callable[..., RunResult]):
output = run("data", "split", "nlu", "--help")
help_text = """usage: rasa data split nlu [-h] [-v] [-vv] [--quiet] [-u NLU]
[--training-fraction TRAINING_FRACTION]
[--random-seed RANDOM_SEED] [--out OUT]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_data_convert_help(run: Callable[..., RunResult]):
output = run("data", "convert", "nlu", "--help")
help_text = """usage: rasa data convert nlu [-h] [-v] [-vv] [--quiet] --data DATA --out OUT
[-l LANGUAGE] -f {json,md}"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_data_validate_help(run: Callable[..., RunResult]):
output = run("data", "validate", "--help")
help_text = """usage: rasa data validate [-h] [-v] [-vv] [--quiet]
[--max-history MAX_HISTORY] [--fail-on-warnings]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def _text_is_part_of_output_error(text: Text, output: RunResult) -> bool:
found_info_string = False
for line in output.errlines:
if text in line:
found_info_string = True
return found_info_string
def test_data_validate_stories_with_max_history_zero(monkeypatch: MonkeyPatch):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="Rasa commands")
data.add_subparser(subparsers, parents=[])
args = parser.parse_args(["data", "validate", "stories", "--max-history", 0])
async def mock_from_importer(importer: TrainingDataImporter) -> Validator:
return Mock()
monkeypatch.setattr("rasa.validator.Validator.from_importer", mock_from_importer)
with pytest.raises(argparse.ArgumentTypeError):
data.validate_files(args)
def test_validate_files_exit_early():
with pytest.raises(SystemExit) as pytest_e:
args = {
"domain": "data/test_domains/duplicate_intents.yml",
"data": None,
"max_history": None,
}
data.validate_files(namedtuple("Args", args.keys())(*args.values()))
assert pytest_e.type == SystemExit
assert pytest_e.value.code == 1
| import argparse
import os
from unittest.mock import Mock
import pytest
from collections import namedtuple
from typing import Callable, Text
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import RunResult
from rasa.cli import data
from rasa.importers.importer import TrainingDataImporter
from rasa.validator import Validator
def test_data_split_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"data", "split", "nlu", "-u", "data/nlu.yml", "--training-fraction", "0.75"
)
assert os.path.exists("train_test_split")
assert os.path.exists(os.path.join("train_test_split", "test_data.md"))
assert os.path.exists(os.path.join("train_test_split", "training_data.md"))
def test_data_convert_nlu(run_in_simple_project: Callable[..., RunResult]):
run_in_simple_project(
"data",
"convert",
"nlu",
"--data",
"data/nlu.yml",
"--out",
"out_nlu_data.json",
"-f",
"json",
)
assert os.path.exists("out_nlu_data.json")
def test_data_split_help(run: Callable[..., RunResult]):
output = run("data", "split", "nlu", "--help")
help_text = """usage: rasa data split nlu [-h] [-v] [-vv] [--quiet] [-u NLU]
[--training-fraction TRAINING_FRACTION]
[--random-seed RANDOM_SEED] [--out OUT]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_data_convert_help(run: Callable[..., RunResult]):
output = run("data", "convert", "nlu", "--help")
help_text = """usage: rasa data convert nlu [-h] [-v] [-vv] [--quiet] --data DATA --out OUT
[-l LANGUAGE] -f {json,md}"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_data_validate_help(run: Callable[..., RunResult]):
output = run("data", "validate", "--help")
help_text = """usage: rasa data validate [-h] [-v] [-vv] [--quiet]
[--max-history MAX_HISTORY] [--fail-on-warnings]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def _text_is_part_of_output_error(text: Text, output: RunResult) -> bool:
found_info_string = False
for line in output.errlines:
if text in line:
found_info_string = True
return found_info_string
def test_data_validate_stories_with_max_history_zero(monkeypatch: MonkeyPatch):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="Rasa commands")
data.add_subparser(subparsers, parents=[])
args = parser.parse_args(["data", "validate", "stories", "--max-history", 0])
async def mock_from_importer(importer: TrainingDataImporter) -> Validator:
return Mock()
monkeypatch.setattr("rasa.validator.Validator.from_importer", mock_from_importer)
with pytest.raises(argparse.ArgumentTypeError):
data.validate_files(args)
def test_validate_files_exit_early():
with pytest.raises(SystemExit) as pytest_e:
args = {
"domain": "data/test_domains/duplicate_intents.yml",
"data": None,
"max_history": None,
}
data.validate_files(namedtuple("Args", args.keys())(*args.values()))
assert pytest_e.type == SystemExit
assert pytest_e.value.code == 1
| Python | 0 |
a69a346e2fd35e531c72b06a2c895d928340c110 | Fix `includes_today` trait fo `MembershipFactory` | tests/factories/property.py | tests/factories/property.py | from datetime import datetime, timedelta, timezone
from functools import partial
from itertools import chain
import factory
from pycroft.model.user import Membership, PropertyGroup
from pycroft.helpers import interval
from .base import BaseFactory
from .user import UserFactory
class MembershipFactory(BaseFactory):
class Meta:
model = Membership
exclude = ('begins_at', 'ends_at')
begins_at = datetime.now(timezone.utc)
ends_at = None
active_during = interval.closedopen(begins_at, ends_at)
user = factory.SubFactory(UserFactory)
# note: group is non-nullable!
group = None
class Params:
includes_today = factory.Trait(
active_during=interval.closedopen(
datetime.now(timezone.utc) - timedelta(1),
datetime.now(timezone.utc) + timedelta(1),
),
)
def _maybe_append_seq(n, prefix):
"""Append a sequence value to a prefix if non-zero"""
if not n:
return prefix
return "{} {}".format(prefix, n)
class PropertyGroupFactory(BaseFactory):
class Meta:
model = PropertyGroup
exclude = ('granted', 'denied')
granted = frozenset()
denied = frozenset()
name = factory.Sequence(lambda n: "Property group %s" % n)
permission_level = factory.LazyAttribute(lambda _: 0)
@factory.lazy_attribute
def property_grants(self):
return dict(chain(((k, True) for k in self.granted),
((k, False) for k in self.denied)))
class AdminPropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Admin-Gruppe"))
granted = frozenset((
'user_show', 'user_change', 'user_mac_change',
'infrastructure_show', 'infrastructure_change',
'facilities_show', 'facilities_change',
'groups_show', 'groups_change_membership', 'groups_change',
))
permission_level = 10
class FinancePropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Finanzer-Gruppe"))
granted = frozenset(('finance_show', 'finance_change'))
permission_level = 80
class MemberPropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Mitglied-Gruppe"))
granted = frozenset((
'ldap', 'ldap_login_enabled', 'mail', 'member', 'membership_fee',
'network_access', 'userdb', 'userwww'
))
| from datetime import datetime, timedelta, timezone
from functools import partial
from itertools import chain
import factory
from pycroft.model.user import Membership, PropertyGroup
from pycroft.helpers import interval
from .base import BaseFactory
from .user import UserFactory
class MembershipFactory(BaseFactory):
class Meta:
model = Membership
exclude = ('begins_at', 'ends_at')
begins_at = datetime.now(timezone.utc)
ends_at = None
active_during = interval.closedopen(begins_at, ends_at)
user = factory.SubFactory(UserFactory)
# note: group is non-nullable!
group = None
class Params:
includes_today = factory.Trait(
begins_at=datetime.now(timezone.utc) - timedelta(1),
ends_at=datetime.now(timezone.utc) + timedelta(1),
)
def _maybe_append_seq(n, prefix):
"""Append a sequence value to a prefix if non-zero"""
if not n:
return prefix
return "{} {}".format(prefix, n)
class PropertyGroupFactory(BaseFactory):
class Meta:
model = PropertyGroup
exclude = ('granted', 'denied')
granted = frozenset()
denied = frozenset()
name = factory.Sequence(lambda n: "Property group %s" % n)
permission_level = factory.LazyAttribute(lambda _: 0)
@factory.lazy_attribute
def property_grants(self):
return dict(chain(((k, True) for k in self.granted),
((k, False) for k in self.denied)))
class AdminPropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Admin-Gruppe"))
granted = frozenset((
'user_show', 'user_change', 'user_mac_change',
'infrastructure_show', 'infrastructure_change',
'facilities_show', 'facilities_change',
'groups_show', 'groups_change_membership', 'groups_change',
))
permission_level = 10
class FinancePropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Finanzer-Gruppe"))
granted = frozenset(('finance_show', 'finance_change'))
permission_level = 80
class MemberPropertyGroupFactory(PropertyGroupFactory):
name = factory.Sequence(partial(_maybe_append_seq, prefix="Mitglied-Gruppe"))
granted = frozenset((
'ldap', 'ldap_login_enabled', 'mail', 'member', 'membership_fee',
'network_access', 'userdb', 'userwww'
))
| Python | 0 |
acce8817eae67dc605ffe628d0d536511d3ea915 | remove dead code | corehq/apps/ota/forms.py | corehq/apps/ota/forms.py | from django import forms
from django.utils.translation import gettext
from crispy_forms import layout as crispy
# todo proper B3 Handle
from crispy_forms.bootstrap import StrictButton
from crispy_forms.helper import FormHelper
from corehq.apps.hqwebapp import crispy as hqcrispy
class PrimeRestoreCacheForm(forms.Form):
info_text = gettext(
"For projects where mobile users manage a lot of cases (e.g. more than 10,000), "
"this tool can be used to temporarily speed up phone sync times. Once activated, "
"it will ensure that the 'Sync with Server' functionality runs faster on the phone for 24 hours.")
def __init__(self, *args, **kwargs):
super(PrimeRestoreCacheForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = '.'
self.helper.layout = crispy.Layout(
crispy.HTML("<p>" + self.info_text + "</p>"),
hqcrispy.FormActions(
StrictButton(
"Click here to speed up 'Sync with Server'",
css_class="btn-primary",
type="submit",
),
),
)
| from django import forms
from django.utils.translation import gettext
from crispy_forms import layout as crispy
# todo proper B3 Handle
from crispy_forms.bootstrap import StrictButton
from crispy_forms.helper import FormHelper
from corehq.apps.hqwebapp import crispy as hqcrispy
class PrimeRestoreCacheForm(forms.Form):
info_text = gettext(
"For projects where mobile users manage a lot of cases (e.g. more than 10,000), "
"this tool can be used to temporarily speed up phone sync times. Once activated, "
"it will ensure that the 'Sync with Server' functionality runs faster on the phone for 24 hours.")
def __init__(self, *args, **kwargs):
super(PrimeRestoreCacheForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = '.'
self.helper.layout = crispy.Layout(
crispy.HTML("<p>" + self.info_text + "</p>"),
hqcrispy.FormActions(
StrictButton(
"Click here to speed up 'Sync with Server'",
css_class="btn-primary",
type="submit",
),
),
)
class AdvancedPrimeRestoreCacheForm(forms.Form):
check_cache_only = forms.BooleanField(
label='Check cache only',
help_text="Just check the cache, don't actually generate the restore response.",
required=False
)
overwrite_cache = forms.BooleanField(
label='Overwrite existing cache',
help_text=('This will ignore any existing cache and '
're-calculate the restore response for each user'),
required=False
)
all_users = forms.BooleanField(
label='Include all users',
required=False
)
users = forms.CharField(
label='User list',
help_text=('One username or user_id per line '
'(username e.g. mobile_worker_1)'),
widget=forms.Textarea(attrs={'rows': '5', 'cols': '50'}),
required=False
)
def __init__(self, *args, **kwargs):
super(AdvancedPrimeRestoreCacheForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.form_method = 'post'
self.helper.form_action = '.'
self.helper.layout = crispy.Layout(
crispy.Field('check_cache_only', data_ng_model='check_cache_only'),
crispy.Div(
'version',
'cache_timeout',
'overwrite_cache',
data_ng_hide='check_cache_only'
),
crispy.Field('all_users', data_ng_model='all_users'),
'domain',
crispy.Div('users', data_ng_hide='all_users'),
hqcrispy.FormActions(
StrictButton(
"Submit",
css_class="btn-primary",
type="submit",
),
),
)
def clean_users(self):
user_ids = self.cleaned_data['users'].splitlines()
self.user_ids = [_f for _f in user_ids if _f]
return self.cleaned_data['users']
def clean(self):
cleaned_data = super(AdvancedPrimeRestoreCacheForm, self).clean()
if not self.user_ids and not cleaned_data['all_users']:
raise forms.ValidationError("Please supply user IDs or select the 'All Users' option")
return cleaned_data
| Python | 0.999454 |
24439d318668897d8d1aff99df1606e80d45b875 | add watchdog test | tests/test_bmc.py | tests/test_bmc.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from nose.tools import eq_, raises
from pyipmi.bmc import *
import pyipmi.msgs.bmc
from pyipmi.msgs import encode_message
from pyipmi.msgs import decode_message
def test_watchdog_object():
m = pyipmi.msgs.bmc.GetWatchdogTimerRsp()
decode_message(m, '\x00\x41\x42\x33\x44\x55\x66\x77\x88')
w = Watchdog(m)
eq_(w.timer_use, 1)
eq_(w.is_running, 1)
eq_(w.dont_log, 0)
eq_(w.timeout_action, 2)
eq_(w.pre_timeout_interrupt, 4)
eq_(w.pre_timeout_interval, 0x33)
eq_(w.timer_use_expiration_flags, 0x44)
eq_(w.initial_countdown, 0x6655)
eq_(w.present_countdown, 0x8877)
def test_deviceid_object():
m = pyipmi.msgs.bmc.GetDeviceIdRsp()
decode_message(m, '\x00\x12\x84\x05\x67\x51\x55\x12\x34\x56\x44\x55')
d = DeviceId(m)
eq_(d.device_id, 18)
eq_(d.revision, 4)
eq_(d.provides_sdrs, True)
eq_(str(d.fw_revision), '5.67')
eq_(str(d.ipmi_version), '1.5')
eq_(d.manufacturer_id, 5649426)
eq_(d.product_id, 21828)
eq_(d.aux, None)
| #!/usr/bin/env python
#-*- coding: utf-8 -*-
from nose.tools import eq_, raises
from pyipmi.bmc import *
import pyipmi.msgs.bmc
from pyipmi.msgs import encode_message
from pyipmi.msgs import decode_message
def test_deviceid_object():
m = pyipmi.msgs.bmc.GetDeviceIdRsp()
decode_message(m, '\x00\x12\x84\x05\x67\x51\x55\x12\x34\x56\x44\x55')
d = DeviceId(m)
eq_(d.device_id, 18)
eq_(d.revision, 4)
eq_(d.provides_sdrs, True)
eq_(str(d.fw_revision), '5.67')
eq_(str(d.ipmi_version), '1.5')
eq_(d.manufacturer_id, 5649426)
eq_(d.product_id, 21828)
eq_(d.aux, None)
| Python | 0 |
5ae1d9ebcc34d47c858ba63e26121be92771d812 | temporary fix test_bot login | tests/test_bot.py | tests/test_bot.py | import json
import requests
from instabot import Bot
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
class TestBot:
def setup(self):
self.USER_ID = 1234567
self.USERNAME = "test_username"
self.PASSWORD = "test_password"
self.FULLNAME = "test_full_name"
self.TOKEN = "abcdef123456"
self.bot = Bot()
self.prepare_api(self.bot)
def prepare_api(self, bot):
bot.api.is_logged_in = True
bot.api.session = requests.Session()
cookies = Mock()
cookies.return_value = {"csrftoken": self.TOKEN, "ds_user_id": self.USER_ID}
bot.api.session.cookies.get_dict = cookies
bot.api.set_user(self.USERNAME, self.PASSWORD)
class TestBotAPI(TestBot):
@patch("instabot.API.load_uuid_and_cookie")
def test_login(self, load_cookie_mock):
self.bot = Bot()
load_cookie_mock.side_effect = Exception()
def mockreturn(*args, **kwargs):
r = Mock()
r.status_code = 200
r.text = '{"status": "ok"}'
return r
def mockreturn_login(*args, **kwargs):
r = Mock()
r.status_code = 200
r.text = json.dumps(
{
"logged_in_user": {
"pk": self.USER_ID,
"username": self.USERNAME,
"full_name": self.FULLNAME,
},
"status": "ok",
}
)
return r
with patch("requests.Session") as Session:
instance = Session.return_value
instance.get.return_value = mockreturn()
instance.post.return_value = mockreturn_login()
instance.cookies = requests.cookies.RequestsCookieJar()
instance.cookies.update(
{"csrftoken": self.TOKEN, "ds_user_id": self.USER_ID}
)
assert self.bot.api.login(
username=self.USERNAME,
password=self.PASSWORD,
use_cookie=False
)
assert self.bot.api.username == self.USERNAME
assert self.bot.user_id == self.USER_ID
assert self.bot.api.is_logged_in
assert self.bot.api.uuid
assert self.bot.api.token
def test_generate_uuid(self):
from uuid import UUID
generated_uuid = self.bot.api.generate_UUID(True)
assert isinstance(UUID(generated_uuid), UUID)
assert UUID(generated_uuid).hex == generated_uuid.replace("-", "")
def test_set_user(self):
test_username = "abcdef"
test_password = "passwordabc"
self.bot.api.set_user(test_username, test_password)
assert self.bot.api.username == test_username
assert self.bot.api.password == test_password
assert hasattr(self.bot.api, "uuid")
def test_reset_counters(self):
keys = [
"liked",
"unliked",
"followed",
"messages",
"unfollowed",
"commented",
"blocked",
"unblocked",
]
for key in keys:
self.bot.total[key] = 1
assert self.bot.total[key] == 1
self.bot.reset_counters()
for key in keys:
assert self.bot.total[key] == 0
| import json
import requests
from instabot import Bot
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
class TestBot:
def setup(self):
self.USER_ID = 1234567
self.USERNAME = "test_username"
self.PASSWORD = "test_password"
self.FULLNAME = "test_full_name"
self.TOKEN = "abcdef123456"
self.bot = Bot()
self.prepare_api(self.bot)
def prepare_api(self, bot):
bot.api.is_logged_in = True
bot.api.session = requests.Session()
cookies = Mock()
cookies.return_value = {"csrftoken": self.TOKEN, "ds_user_id": self.USER_ID}
bot.api.session.cookies.get_dict = cookies
bot.api.set_user(self.USERNAME, self.PASSWORD)
class TestBotAPI(TestBot):
@patch("instabot.API.load_uuid_and_cookie")
def test_login(self, load_cookie_mock):
self.bot = Bot()
load_cookie_mock.side_effect = Exception()
def mockreturn(*args, **kwargs):
r = Mock()
r.status_code = 200
r.text = '{"status": "ok"}'
return r
def mockreturn_login(*args, **kwargs):
r = Mock()
r.status_code = 200
r.text = json.dumps(
{
"logged_in_user": {
"pk": self.USER_ID,
"username": self.USERNAME,
"full_name": self.FULLNAME,
},
"status": "ok",
}
)
return r
with patch("requests.Session") as Session:
instance = Session.return_value
instance.get.return_value = mockreturn()
instance.post.return_value = mockreturn_login()
instance.cookies = requests.cookies.RequestsCookieJar()
instance.cookies.update(
{"csrftoken": self.TOKEN, "ds_user_id": self.USER_ID}
)
assert self.bot.api.login(username=self.USERNAME, password=self.PASSWORD)
assert self.bot.api.username == self.USERNAME
assert self.bot.user_id == self.USER_ID
assert self.bot.api.is_logged_in
assert self.bot.api.uuid
assert self.bot.api.token
def test_generate_uuid(self):
from uuid import UUID
generated_uuid = self.bot.api.generate_UUID(True)
assert isinstance(UUID(generated_uuid), UUID)
assert UUID(generated_uuid).hex == generated_uuid.replace("-", "")
def test_set_user(self):
test_username = "abcdef"
test_password = "passwordabc"
self.bot.api.set_user(test_username, test_password)
assert self.bot.api.username == test_username
assert self.bot.api.password == test_password
assert hasattr(self.bot.api, "uuid")
def test_reset_counters(self):
keys = [
"liked",
"unliked",
"followed",
"messages",
"unfollowed",
"commented",
"blocked",
"unblocked",
]
for key in keys:
self.bot.total[key] = 1
assert self.bot.total[key] == 1
self.bot.reset_counters()
for key in keys:
assert self.bot.total[key] == 0
| Python | 0.997568 |
e6519d121ab80467fafdab6a2183964d97ef60e8 | Add test for set_meta command. | tests/test_cli.py | tests/test_cli.py | # -*- coding: utf-8 -*-
import os
from click.testing import CliRunner
from sigal import init
from sigal import serve
from sigal import set_meta
def test_init(tmpdir):
config_file = str(tmpdir.join('sigal.conf.py'))
runner = CliRunner()
result = runner.invoke(init, [config_file])
assert result.exit_code == 0
assert result.output.startswith('Sample config file created:')
assert os.path.isfile(config_file)
result = runner.invoke(init, [config_file])
assert result.exit_code == 1
assert result.output == ("Found an existing config file, will abort to "
"keep it safe.\n")
def test_serve(tmpdir):
config_file = str(tmpdir.join('sigal.conf.py'))
runner = CliRunner()
result = runner.invoke(init, [config_file])
assert result.exit_code == 0
result = runner.invoke(serve)
assert result.exit_code == 2
result = runner.invoke(serve, ['-c', config_file])
assert result.exit_code == 1
def test_set_meta(tmpdir):
testdir = tmpdir.mkdir("test")
testfile = tmpdir.join("test.jpg")
testfile.write("")
runner = CliRunner()
result = runner.invoke(set_meta, [str(testdir), "title", "testing"])
assert result.exit_code == 0
assert result.output.startswith("1 metadata key(s) written to")
assert os.path.isfile(str(testdir.join("index.md")))
assert testdir.join("index.md").read() == "Title: testing\n"
# Run again, should give file exists error
result = runner.invoke(set_meta, [str(testdir), "title", "testing"])
assert result.exit_code == 2
result = runner.invoke(set_meta, [str(testdir.join("non-existant.jpg")), "title", "testing"])
assert result.exit_code == 1
result = runner.invoke(set_meta, [str(testfile), "title", "testing"])
assert result.exit_code == 0
assert result.output.startswith("1 metadata key(s) written to")
assert os.path.isfile(str(tmpdir.join("test.md")))
assert tmpdir.join("test.md").read() == "Title: testing\n"
| # -*- coding: utf-8 -*-
import os
from click.testing import CliRunner
from sigal import init
from sigal import serve
def test_init(tmpdir):
config_file = str(tmpdir.join('sigal.conf.py'))
runner = CliRunner()
result = runner.invoke(init, [config_file])
assert result.exit_code == 0
assert result.output.startswith('Sample config file created:')
assert os.path.isfile(config_file)
result = runner.invoke(init, [config_file])
assert result.exit_code == 1
assert result.output == ("Found an existing config file, will abort to "
"keep it safe.\n")
def test_serve(tmpdir):
config_file = str(tmpdir.join('sigal.conf.py'))
runner = CliRunner()
result = runner.invoke(init, [config_file])
assert result.exit_code == 0
result = runner.invoke(serve)
assert result.exit_code == 2
result = runner.invoke(serve, ['-c', config_file])
assert result.exit_code == 1
| Python | 0 |
f5652e96edf871ca88e80a920cfc97876e7531a3 | modify tests (add example for string => int) | tests/test_fst.py | tests/test_fst.py | import os, sys
# TODO: better way to find package...
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import fst
from fst import Matcher
import unittest
from struct import pack, unpack
class TestFST(unittest.TestCase):
def test_create_minimum_transducer1(self):
dict_file = '/tmp/dict1.dat'
inputs = [
('apr'.encode('utf8'), pack('i', 30)),
('aug'.encode('utf8'), pack('i', 31)),
('dec'.encode('utf8'), pack('i', 31)),
('feb'.encode('utf8'), pack('i', 28)),
('feb'.encode('utf8'), pack('i', 29)),
('jan'.encode('utf8'), pack('i', 31)),
('jul'.encode('utf8'), pack('i', 31)),
('jun'.encode('utf8'), pack('i', 30)),
('may'.encode('utf8'), pack('i', 31))
]
dictionary = fst.create_minimum_transducer(inputs)
data = fst.compileFST(dictionary)
m = Matcher(data)
# accepted strings
self.assertEqual((True, set([pack('i', 30)])), m.run('apr'.encode('utf8')))
self.assertEqual((True, set([pack('i', 31)])), m.run('aug'.encode('utf8')))
self.assertEqual((True, set([pack('i', 31)])), m.run('dec'.encode('utf8')))
self.assertEqual((True, set([pack('i', 28), pack('i', 29)])), m.run('feb'.encode('utf8')))
self.assertEqual((True, set([pack('i', 31)])), m.run('jan'.encode('utf8')))
self.assertEqual((True, set([pack('i', 31)])), m.run('jul'.encode('utf8')))
self.assertEqual((True, set([pack('i', 30)])), m.run('jun'.encode('utf8')))
self.assertEqual((True, set([pack('i', 31)])), m.run('may'.encode('utf8')))
# not accepted string
self.assertEqual((False, set()), m.run('mar'))
def test_create_minimum_transducer2(self):
dict_file = '/tmp/dict2.dat'
inputs = [
('さくら'.encode('utf8'), '白'.encode('utf8')),
('さくらんぼ'.encode('utf8'), '赤'.encode('utf8')),
('すもも'.encode('utf8'), '赤'.encode('utf8')),
('なし'.encode('utf8'), '茶'.encode('utf8')),
('もも'.encode('utf8'), '桃'.encode('utf8')),
]
dictionary = fst.create_minimum_transducer(inputs)
data = fst.compileFST(dictionary)
fst.save(dict_file, data)
self.assertGreater(os.path.getsize(dict_file), 0)
m = Matcher(file=dict_file)
# accepted strings
self.assertEqual((True, set(['白'.encode('utf8')])), m.run('さくら'.encode('utf8')))
self.assertEqual((True, set(['白'.encode('utf8'), '赤'.encode('utf8')])), m.run('さくらんぼ'.encode('utf8')))
self.assertEqual((True, set(['赤'.encode('utf8')])), m.run('すもも'.encode('utf8')))
self.assertEqual((True, set(['茶'.encode('utf8')])), m.run('なし'.encode('utf8')))
self.assertEqual((True, set(['桃'.encode('utf8')])), m.run('もも'.encode('utf8')))
# not accepted string
self.assertEqual((False, set()), m.run('りんご'.encode('utf8')))
if __name__ == '__main__':
unittest.main()
| import os, sys
# TODO: better way to find package...
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import fst
from fst import Matcher
import unittest
class TestFST(unittest.TestCase):
def test_create_minimum_transducer1(self):
dict_file = '/tmp/dict1.dat'
inputs = [
('apr'.encode('utf8'), '30'.encode('utf8')),
('aug'.encode('utf8'), '31'.encode('utf8')),
('dec'.encode('utf8'), '31'.encode('utf8')),
('feb'.encode('utf8'), '28'.encode('utf8')),
('feb'.encode('utf8'), '29'.encode('utf8')),
('jan'.encode('utf8'), '31'.encode('utf8')),
('jul'.encode('utf8'), '31'.encode('utf8')),
('jun'.encode('utf8'), '30'.encode('utf8')),
('may'.encode('utf8'), '31'.encode('utf8'))
]
dictionary = fst.create_minimum_transducer(inputs)
data = fst.compileFST(dictionary)
m = Matcher(data)
# accepted strings
self.assertEqual((True, set(['30'.encode('utf-8')])), m.run('apr'.encode('utf8')))
self.assertEqual((True, set(['31'.encode('utf-8')])), m.run('aug'.encode('utf8')))
self.assertEqual((True, set(['31'.encode('utf-8')])), m.run('dec'.encode('utf8')))
self.assertEqual((True, set(['28'.encode('utf-8'), '29'.encode('utf8')])), m.run('feb'.encode('utf8')))
self.assertEqual((True, set(['31'.encode('utf-8')])), m.run('jan'.encode('utf8')))
self.assertEqual((True, set(['31'.encode('utf-8')])), m.run('jul'.encode('utf8')))
self.assertEqual((True, set(['30'.encode('utf-8')])), m.run('jun'.encode('utf8')))
self.assertEqual((True, set(['31'.encode('utf-8')])), m.run('may'.encode('utf8')))
# not accepted string
self.assertEqual((False, set()), m.run('mar'))
def test_create_minimum_transducer2(self):
dict_file = '/tmp/dict2.dat'
inputs = [
('さくら'.encode('utf8'), '10'.encode('utf8')),
('さくらんぼ'.encode('utf8'), '11'.encode('utf8')),
('すもも'.encode('utf8'), '20'.encode('utf8')),
('なし'.encode('utf8'), '10'.encode('utf8')),
('もも'.encode('utf8'), '20'.encode('utf8')),
]
dictionary = fst.create_minimum_transducer(inputs)
data = fst.compileFST(dictionary)
fst.save(dict_file, data)
self.assertGreater(os.path.getsize(dict_file), 0)
m = Matcher(file=dict_file)
# accepted strings
self.assertEqual((True, set(['10'.encode('utf8')])), m.run('さくら'.encode('utf8')))
self.assertEqual((True, set(['10'.encode('utf8'), '11'.encode('utf8')])), m.run('さくらんぼ'.encode('utf8')))
self.assertEqual((True, set(['20'.encode('utf8')])), m.run('すもも'.encode('utf8')))
self.assertEqual((True, set(['10'.encode('utf8')])), m.run('なし'.encode('utf8')))
self.assertEqual((True, set(['20'.encode('utf8')])), m.run('もも'.encode('utf8')))
# not accepted string
self.assertEqual((False, set()), m.run('りんご'.encode('utf8')))
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
d2de2d44a46ff521ab8c1d8bbc57d4eeb8d5dc53 | Fix an error | taiga/users/services.py | taiga/users/services.py | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This model contains a domain logic for users application.
"""
from django.db.models.loading import get_model
from django.db.models import Q
from easy_thumbnails.files import get_thumbnailer
from taiga.base import exceptions as exc
from taiga.base.utils.urls import get_absolute_url
from .gravatar import get_gravatar_url
def get_and_validate_user(*, username:str, password:str) -> bool:
"""
Check if user with username/email exists and specified
password matchs well with existing user password.
if user is valid, user is returned else, corresponding
exception is raised.
"""
user_model = get_model("users", "User")
qs = user_model.objects.filter(Q(username=username) |
Q(email=username))
if len(qs) == 0:
raise exc.WrongArguments("Username or password does not matches user.")
user = qs[0]
if not user.check_password(password):
raise exc.WrongArguments("Username or password does not matches user.")
return user
def get_photo_url(photo):
"""Get a photo absolute url and the photo automatically cropped."""
url = get_thumbnailer(photo)['avatar'].url
return get_absolute_url(url)
def get_photo_or_gravatar_url(user):
"""Get the user's photo/gravatar url."""
if user:
return get_photo_url(user.photo) if user.photo else get_gravatar_url(user.email)
return ""
| # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This model contains a domain logic for users application.
"""
from django.db.models.loading import get_model
from django.db.models import Q
from easy_thumbnails.files import get_thumbnailer
from taiga.base import exceptions as exc
from taiga.base.utils.urls import get_absolute_url
from .gravatar import get_gravatar_url
def get_and_validate_user(*, username:str, password:str) -> bool:
"""
Check if user with username/email exists and specified
password matchs well with existing user password.
if user is valid, user is returned else, corresponding
exception is raised.
"""
user_model = get_model("users", "User")
qs = user_model.objects.filter(Q(username=username) |
Q(email=username))
if len(qs) == 0:
raise exc.WrongArguments("Username or password does not matches user.")
user = qs[0]
if not user.check_password(password):
raise exc.WrongArguments("Username or password does not matches user.")
return user
def get_photo_url(photo):
"""Get a photo absolute url and the photo automatically cropped."""
url = get_thumbnailer(photo)['avatar'].url
return get_absolute_url(url)
def get_photo_or_gravatar_url(user):
"""Get the user's photo/gravatar url."""
return get_photo_url(user.photo) if user.photo else get_gravatar_url(user.email)
| Python | 0.998142 |
0f5d0353f9faad9bb34432cd047540b81c6ea643 | add exception test for invalid authentication | tests/test_tpm.py | tests/test_tpm.py | import requests
import requests_mock
import unittest
import os.path
import tpm
import json
import logging
log = logging.getLogger(__name__)
api_url = 'https://tpm.example.com/index.php/api/v4/'
local_path = 'tests/resources/'
item_limit = 20
def fake_data(url, m):
"""
A stub urlopen() implementation that load json responses from
the filesystem.
"""
# Map path from url to a file
path_parts = url.split('/')[6:]
path = '/'.join(path_parts)
resource_file = os.path.normpath('tests/resources/{}'.format(path))
data_file = open(resource_file)
data = json.load(data_file)
# Must return a json-like object
count = 0
header = {}
while True:
count += 1
if len(data) > item_limit:
returndata = data[:item_limit]
data = data[item_limit:]
pageingurl = url.replace('.json', '/page/{}.json'.format(count))
log.debug("Registering URL: {}".format(pageingurl))
log.debug("Registering data: {}".format(returndata))
log.debug("Data length: {}".format(len(returndata)))
log.debug("Registering header: {}".format(header))
m.get(pageingurl, json=returndata, headers=header.copy())
header = { 'link': '{}; rel="next"'.format(pageingurl)}
else:
log.debug("Registering URL: {}".format(url))
log.debug("Registering data: {}".format(data))
log.debug("Registering header: {}".format(header))
log.debug("Data length: {}".format(len(data)))
m.get(url, json=data, headers=header.copy())
header.clear()
break
class ClientTestCase(unittest.TestCase):
"""Test case for the client methods."""
def setUp(self):
self.client = tpm.TpmApiv4('https://tpm.example.com', username='USER', password='PASS')
def test_user_auth_method(self):
"""Test user based authentication method."""
pass
def test_paging(self):
"""Test paging, if number of items is same as from original data source."""
path_to_mock = 'passwords.json'
request_url = api_url + path_to_mock
request_path = local_path + path_to_mock
resource_file = os.path.normpath(request_path)
data_file = open(resource_file)
data = json.load(data_file)
with requests_mock.Mocker() as m:
fake_data(request_url, m)
response = self.client.list_passwords()
# number of passwords as from original json file.
source_items = len(data)
response_items = len(response)
log.debug("Source Items: {}; Response Items: {}".format(source_items, response_items))
self.assertEqual(source_items, response_items)
def test_logging(self):
"""Test Logging."""
pass
class ExceptionTestCase(unittest.TestCase):
"""Test case for all kind of Exceptions."""
def test_wrong_auth_exception(self):
"""Exception if wrong authentication mehtod."""
with self.assertRaises(tpm.TpmApi.ConfigError) as context:
tpm.TpmApiv4('https://tpm.example.com', username='USER', private_key='PASS')
log.debug("context exception: {}".format(context.exception))
self.assertEqual("'No authentication specified (user/password or private/public key)'", str(context.exception))
def test_wrong_url_exception(self):
"""Exception if URL does not match REGEXurl."""
wrong_url = 'ftp://tpm.example.com'
with self.assertRaises(tpm.TpmApiv4.ConfigError) as context:
tpm.TpmApiv4(wrong_url, username='USER', password='PASS')
log.debug("context exception: {}".format(context.exception))
self.assertEqual("'Invalid URL: {}'".format(wrong_url), str(context.exception))
| import requests
import requests_mock
import unittest
import os.path
import tpm
import json
import logging
log = logging.getLogger(__name__)
api_url = 'https://tpm.example.com/index.php/api/v4/'
local_path = 'tests/resources/'
item_limit = 20
def fake_data(url, m):
"""
A stub urlopen() implementation that load json responses from
the filesystem.
"""
# Map path from url to a file
path_parts = url.split('/')[6:]
path = '/'.join(path_parts)
resource_file = os.path.normpath('tests/resources/{}'.format(path))
data_file = open(resource_file)
data = json.load(data_file)
# Must return a json-like object
count = 0
header = {}
while True:
count += 1
if len(data) > item_limit:
returndata = data[:item_limit]
data = data[item_limit:]
pageingurl = url.replace('.json', '/page/{}.json'.format(count))
log.debug("Registering URL: {}".format(pageingurl))
log.debug("Registering data: {}".format(returndata))
log.debug("Data length: {}".format(len(returndata)))
log.debug("Registering header: {}".format(header))
m.get(pageingurl, json=returndata, headers=header.copy())
header = { 'link': '{}; rel="next"'.format(pageingurl)}
else:
log.debug("Registering URL: {}".format(url))
log.debug("Registering data: {}".format(data))
log.debug("Registering header: {}".format(header))
log.debug("Data length: {}".format(len(data)))
m.get(url, json=data, headers=header.copy())
header.clear()
break
class ClientTestCase(unittest.TestCase):
"""Test case for the client methods."""
def setUp(self):
self.client = tpm.TpmApiv4('https://tpm.example.com', username='USER', password='PASS')
def test_user_auth_method(self):
"""Test user based authentication method."""
pass
def test_paging(self):
"""Test paging, if number of items is same as from original data source."""
path_to_mock = 'passwords.json'
request_url = api_url + path_to_mock
request_path = local_path + path_to_mock
resource_file = os.path.normpath(request_path)
data_file = open(resource_file)
data = json.load(data_file)
with requests_mock.Mocker() as m:
fake_data(request_url, m)
response = self.client.list_passwords()
# number of passwords as from original json file.
source_items = len(data)
response_items = len(response)
log.debug("Source Items: {}; Response Items: {}".format(source_items, response_items))
self.assertEqual(source_items, response_items)
def test_logging(self):
"""Test Logging."""
pass
class ExceptionTestCase(unittest.TestCase):
"""Test case for all kind of Exceptions."""
def test_wrong_url_exception(self):
"""Exception if URL does not match REGEXurl."""
wrong_url = 'ftp://tpm.example.com'
with self.assertRaises(tpm.TpmApiv4.ConfigError) as context:
tpm.TpmApiv4(wrong_url, username='USER', password='PASS')
log.debug("context exception: {}".format(context.exception))
self.assertEqual("'Invalid URL: {}'".format(wrong_url), str(context.exception))
| Python | 0.000001 |
d43d4638eefe6d08dcb9ad739753bc4c43647c2a | fix another lazy test | tests/legacy/test_xmlrpc.py | tests/legacy/test_xmlrpc.py | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
import pytest
from werkzeug.exceptions import BadRequest
from warehouse.packaging.models import Project
from warehouse.legacy import xmlrpc
def test_xmlrpc_handler(monkeypatch):
Response = pretend.call_recorder(lambda *a, **k: 'response')
monkeypatch.setattr(xmlrpc, "Response", Response)
interface = pretend.stub(
list_packages=pretend.call_recorder(lambda *a, **k: 'one two'.split())
)
Interface = lambda a, r: interface
monkeypatch.setattr(xmlrpc, "Interface", Interface)
app = pretend.stub()
xml_request = '''<?xml version="1.0"?><methodCall>
<methodName>list_packages</methodName></methodCall>'''
request = pretend.stub(
headers={
'Content-Type': 'text/xml',
'Content-Length': str(len(xml_request)),
},
get_data=lambda **k: xml_request,
)
assert xmlrpc.handle_request(app, request) == 'response'
assert interface.list_packages.calls == [pretend.call()]
response_xml = Response.calls[0].args[0]
assert response_xml == u'''<?xml version='1.0'?>
<methodResponse>
<params>
<param>
<value><array><data>
<value><string>one</string></value>
<value><string>two</string></value>
</data></array></value>
</param>
</params>
</methodResponse>
'''
assert Response.calls[0].kwargs == dict(mimetype='text/xml')
def test_xmlrpc_list_packages():
all_projects = [Project("bar"), Project("foo")]
app = pretend.stub(
models=pretend.stub(
packaging=pretend.stub(
all_projects=pretend.call_recorder(lambda: all_projects),
),
),
)
request = pretend.stub(
headers={'Content-Type': 'text/xml'}
)
interface = xmlrpc.Interface(app, request)
result = interface.list_packages()
assert app.models.packaging.all_projects.calls == [pretend.call()]
assert result == ['bar', 'foo']
def test_xmlrpc_size(monkeypatch):
app = pretend.stub()
request = pretend.stub(
headers={
'Content-Type': 'text/xml',
'Content-Length': str(10 * 1024 * 1024 + 1)
},
)
with pytest.raises(BadRequest):
xmlrpc.handle_request(app, request)
| # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
import pytest
from werkzeug.exceptions import BadRequest
from warehouse.packaging.models import Project
from warehouse.legacy import xmlrpc
def test_xmlrpc_handler(monkeypatch):
Response = pretend.call_recorder(lambda *a, **k: 'response')
monkeypatch.setattr(xmlrpc, "Response", Response)
interface = pretend.stub(
list_packages=pretend.call_recorder(lambda *a, **k: 'one two'.split())
)
Interface = lambda a, r: interface
monkeypatch.setattr(xmlrpc, "Interface", Interface)
app = pretend.stub()
xml_request = '''<?xml version="1.0"?><methodCall>
<methodName>list_packages</methodName></methodCall>'''
request = pretend.stub(
headers={
'Content-Type': 'text/xml',
'Content-Length': str(len(xml_request)),
},
get_data=lambda **k: xml_request,
)
assert xmlrpc.handle_request(app, request) == 'response'
assert interface.list_packages.calls
response_xml = Response.calls[0].args[0]
assert response_xml == u'''<?xml version='1.0'?>
<methodResponse>
<params>
<param>
<value><array><data>
<value><string>one</string></value>
<value><string>two</string></value>
</data></array></value>
</param>
</params>
</methodResponse>
'''
assert Response.calls[0].kwargs == dict(mimetype='text/xml')
def test_xmlrpc_list_packages():
all_projects = [Project("bar"), Project("foo")]
app = pretend.stub(
models=pretend.stub(
packaging=pretend.stub(
all_projects=pretend.call_recorder(lambda: all_projects),
),
),
)
request = pretend.stub(
headers={'Content-Type': 'text/xml'}
)
interface = xmlrpc.Interface(app, request)
result = interface.list_packages()
assert app.models.packaging.all_projects.calls == [pretend.call()]
assert result == ['bar', 'foo']
def test_xmlrpc_size(monkeypatch):
app = pretend.stub()
request = pretend.stub(
headers={
'Content-Type': 'text/xml',
'Content-Length': str(10 * 1024 * 1024 + 1)
},
)
with pytest.raises(BadRequest):
xmlrpc.handle_request(app, request)
| Python | 0.000034 |
87ff78dfe54795f9067fa45f832e8bc84b16c894 | Fix integer division | tf_rl/simulate.py | tf_rl/simulate.py | from __future__ import division
import math
import time
from IPython.display import clear_output, display, HTML
from itertools import count
from os.path import join, exists
from os import makedirs
def simulate(simulation,
controller= None,
fps=60,
visualize_every=1,
action_every=1,
simulation_resolution=None,
wait=False,
disable_training=False,
save_path=None):
"""Start the simulation. Performs three tasks
- visualizes simulation in iPython notebook
- advances simulator state
- reports state to controller and chooses actions
to be performed.
Parameters
-------
simulation: tr_lr.simulation
simulation that will be simulated ;-)
controller: tr_lr.controller
controller used
fps: int
frames per seconds
visualize_every: int
visualize every `visualize_every`-th frame.
action_every: int
take action every `action_every`-th frame
simulation_resolution: float
simulate at most 'simulation_resolution' seconds at a time.
If None, the it is set to 1/FPS (default).
wait: boolean
whether to intentionally slow down the simulation
to appear real time.
disable_training: bool
if true training_step is never called.
save_path: str
save svg visualization (only tl_rl.utils.svg
supported for the moment)
"""
# prepare path to save simulation images
if save_path is not None:
if not exists(save_path):
makedirs(save_path)
last_image = 0
# calculate simulation times
chunks_per_frame = 1
chunk_length_s = 1.0 / fps
if simulation_resolution is not None:
frame_length_s = 1.0 / fps
chunks_per_frame = int(math.ceil(frame_length_s / simulation_resolution))
chunks_per_frame = max(chunks_per_frame, 1)
chunk_length_s = frame_length_s / chunks_per_frame
# state transition bookkeeping
last_observation = None
last_action = None
simulation_started_time = time.time()
for frame_no in count():
for _ in range(chunks_per_frame):
simulation.step(chunk_length_s)
if frame_no % action_every == 0:
new_observation = simulation.observe()
reward = simulation.collect_reward()
# store last transition
if last_observation is not None:
controller.store(last_observation, last_action, reward, new_observation)
# act
new_action = controller.action(new_observation)
simulation.perform_action(new_action)
#train
if not disable_training:
controller.training_step()
# update current state as last state.
last_action = new_action
last_observation = new_observation
# adding 1 to make it less likely to happen at the same time as
# action taking.
if (frame_no + 1) % visualize_every == 0:
fps_estimate = frame_no / (time.time() - simulation_started_time)
clear_output(wait=True)
svg_html = simulation.to_html(["fps = %.1f" % (fps_estimate,)])
display(svg_html)
if save_path is not None:
img_path = join(save_path, "%d.svg" % (last_image,))
with open(img_path, "w") as f:
svg_html.write_svg(f)
last_image += 1
time_should_have_passed = frame_no / fps
time_passed = (time.time() - simulation_started_time)
if wait and (time_should_have_passed > time_passed):
time.sleep(time_should_have_passed - time_passed)
| import math
import time
from IPython.display import clear_output, display, HTML
from itertools import count
from os.path import join, exists
from os import makedirs
def simulate(simulation,
controller= None,
fps=60,
visualize_every=1,
action_every=1,
simulation_resolution=None,
wait=False,
disable_training=False,
save_path=None):
"""Start the simulation. Performs three tasks
- visualizes simulation in iPython notebook
- advances simulator state
- reports state to controller and chooses actions
to be performed.
Parameters
-------
simulation: tr_lr.simulation
simulation that will be simulated ;-)
controller: tr_lr.controller
controller used
fps: int
frames per seconds
visualize_every: int
visualize every `visualize_every`-th frame.
action_every: int
take action every `action_every`-th frame
simulation_resolution: float
simulate at most 'simulation_resolution' seconds at a time.
If None, the it is set to 1/FPS (default).
wait: boolean
whether to intentionally slow down the simulation
to appear real time.
disable_training: bool
if true training_step is never called.
save_path: str
save svg visualization (only tl_rl.utils.svg
supported for the moment)
"""
# prepare path to save simulation images
if save_path is not None:
if not exists(save_path):
makedirs(save_path)
last_image = 0
# calculate simulation times
chunks_per_frame = 1
chunk_length_s = 1.0 / fps
if simulation_resolution is not None:
frame_length_s = 1.0 / fps
chunks_per_frame = int(math.ceil(frame_length_s / simulation_resolution))
chunks_per_frame = max(chunks_per_frame, 1)
chunk_length_s = frame_length_s / chunks_per_frame
# state transition bookkeeping
last_observation = None
last_action = None
simulation_started_time = time.time()
for frame_no in count():
for _ in range(chunks_per_frame):
simulation.step(chunk_length_s)
if frame_no % action_every == 0:
new_observation = simulation.observe()
reward = simulation.collect_reward()
# store last transition
if last_observation is not None:
controller.store(last_observation, last_action, reward, new_observation)
# act
new_action = controller.action(new_observation)
simulation.perform_action(new_action)
#train
if not disable_training:
controller.training_step()
# update current state as last state.
last_action = new_action
last_observation = new_observation
# adding 1 to make it less likely to happen at the same time as
# action taking.
if (frame_no + 1) % visualize_every == 0:
fps_estimate = frame_no / (time.time() - simulation_started_time)
clear_output(wait=True)
svg_html = simulation.to_html(["fps = %.1f" % (fps_estimate,)])
display(svg_html)
if save_path is not None:
img_path = join(save_path, "%d.svg" % (last_image,))
with open(img_path, "w") as f:
svg_html.write_svg(f)
last_image += 1
time_should_have_passed = frame_no / fps
time_passed = (time.time() - simulation_started_time)
if wait and (time_should_have_passed > time_passed):
time.sleep(time_should_have_passed - time_passed)
| Python | 0.999999 |
31a2439c1137068d8532c5f85cc1c8fb913d7ee8 | Add reconnect to clamscan | modules/Antivirus/ClamAVScan.py | modules/Antivirus/ClamAVScan.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
try:
import pyclamd
except:
print("pyclamd module not installed...")
pyclamd = None
__author__ = 'Mike Long'
__license__ = "MPL 2.0"
DEFAULTCONF ={
"ENABLED": True,
}
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if not pyclamd:
return False
return True
def _connect_clam():
try:
clamScanner = pyclamd.ClamdUnixSocket()
clamScanner.ping()
except:
clamScanner = pyclamd.ClamdNetworkSocket()
try:
clamScanner.ping()
except:
raise ValueError("Unable to connect to clamd")
return clamScanner
def scan(filelist, conf=DEFAULTCONF):
results = []
clamScanner = _connect_clam()
# Scan each file from filelist for virus
for f in filelist:
output = clamScanner.scan_file(f)
if output is None:
continue
if list(output.values())[0][0] == 'ERROR':
with open(f, 'rb') as file_handle:
try:
output = clamScanner.scan_stream(file_handle.read())
except pyclamd.BufferTooLongError:
continue
except Exception as e:
print(e)
clamScanner = _connect_clam()
output = clamScanner.scan_stream(file_handle.read())
if output is None:
continue
if list(output.values())[0][0] == 'FOUND':
results.append((f, list(output.values())[0][1]))
elif list(output.values())[0][0] == 'ERROR':
print('ClamAV: ERROR:', list(output.values())[0][1])
# Set metadata tags
metadata = {
'Name': "ClamAV",
'Type': "Antivirus",
'Version': clamScanner.version()
}
return (results, metadata)
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
try:
import pyclamd
except:
print("pyclamd module not installed...")
pyclamd = None
__author__ = 'Mike Long'
__license__ = "MPL 2.0"
DEFAULTCONF ={
"ENABLED": True,
}
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if not pyclamd:
return False
return True
def scan(filelist, conf=DEFAULTCONF):
results = []
try:
clamScanner = pyclamd.ClamdUnixSocket()
clamScanner.ping()
except:
clamScanner = pyclamd.ClamdNetworkSocket()
try:
clamScanner.ping()
except:
raise ValueError("Unable to connect to clamd")
# Scan each file from filelist for virus
for f in filelist:
output = clamScanner.scan_file(f)
if output is None:
continue
if list(output.values())[0][0] == 'ERROR':
with open(f, 'rb') as file_handle:
try:
output = clamScanner.scan_stream(file_handle.read())
except pyclamd.BufferTooLongError:
continue
if output is None:
continue
if list(output.values())[0][0] == 'FOUND':
results.append((f, list(output.values())[0][1]))
elif list(output.values())[0][0] == 'ERROR':
print('ClamAV: ERROR:', list(output.values())[0][1])
# Set metadata tags
metadata = {
'Name': "ClamAV",
'Type': "Antivirus",
'Version': clamScanner.version()
}
return (results, metadata)
| Python | 0.000001 |
821e191e05269b9c1cc5f58b3d4cecf5bd20e896 | Correct Range sample | samples/python/com.ibm.streamsx.topology.pysamples/opt/python/streams/spl_sources.py | samples/python/com.ibm.streamsx.topology.pysamples/opt/python/streams/spl_sources.py | # Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015, 2016
from __future__ import absolute_import, division, print_function
# Simple inclusion of Python logic within an SPL application
# as a SPL "Function" operator. A "Function" operator has
# a single input port and single output port, a function
# is called for every input tuple, and results in
# no submission or a single tuple being submitted.
# Import the SPL decorators
from streamsx.spl import spl
# Any function in a Python module (.py file) within the
# toolkit's opt/python/streams directory is converted to a primitive operator
# with a single input and output port. The primitive operator
# is a C++ primitive that embeds the Python runtime.
#
# The function must be decorated with one of these
#
# @spl.pipe - Function is a pipe operator
# @spl.sink - Function is a sink operator
# @spl.ignore - Function is ignored
# Attributes of the input SPL tuple are passed
# as a Python Tuple and thus are available as positional arguments.
# (see examples below)
# Any returned value from a function must be a Tuple.
#
# If nothing is returned then no tuple is submitted
# by the operator for the input tuple.
#
# When a Tuple is returned, its values are assigned
# to the first N attributes of the output tuple,
# that is by position.
# The returned values in the Tuple must be assignable
# to the output tuple attribute types.
#
# If the output port has more than N attributes
# then any remaining attributes are set from the
# input tuple if there is a matching input attribute by
# name and type, otherwise the attribute remains at
# its default value.
#
# If the output port has fewer attributes than N
# then any additional values are ignored.
# Any function whose name starts with spl is not created
# as an operator, such functions are reserved as a mechanism
# to pass information back to the primitive operator generator.
# The description of the function becomes the description
# of the primitive operator model in its operator model.
#------------------------------------------------------------------
# Example functions
#------------------------------------------------------------------
# Defines the SPL namespace for any functions in this module
# Multiple modules can map to the same namespace
def splNamespace():
return "com.ibm.streamsx.topology.pysamples.sources"
@spl.source()
class Range:
def __init__(self, count):
self.count = count
def __iter__(self):
# Use zip to convert the single returned value
# into a tuple to allow it to be returned to SPL
return zip(range(self.count))
| # Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015, 2016
from __future__ import absolute_import, division, print_function
# Simple inclusion of Python logic within an SPL application
# as a SPL "Function" operator. A "Function" operator has
# a single input port and single output port, a function
# is called for every input tuple, and results in
# no submission or a single tuple being submitted.
# Import the SPL decorators
from streamsx.spl import spl
# Any function in a Python module (.py file) within the
# toolkit's opt/python/streams directory is converted to a primitive operator
# with a single input and output port. The primitive operator
# is a C++ primitive that embeds the Python runtime.
#
# The function must be decorated with one of these
#
# @spl.pipe - Function is a pipe operator
# @spl.sink - Function is a sink operator
# @spl.ignore - Function is ignored
# Attributes of the input SPL tuple are passed
# as a Python Tuple and thus are available as positional arguments.
# (see examples below)
# Any returned value from a function must be a Tuple.
#
# If nothing is returned then no tuple is submitted
# by the operator for the input tuple.
#
# When a Tuple is returned, its values are assigned
# to the first N attributes of the output tuple,
# that is by position.
# The returned values in the Tuple must be assignable
# to the output tuple attribute types.
#
# If the output port has more than N attributes
# then any remaining attributes are set from the
# input tuple if there is a matching input attribute by
# name and type, otherwise the attribute remains at
# its default value.
#
# If the output port has fewer attributes than N
# then any additional values are ignored.
# Any function whose name starts with spl is not created
# as an operator, such functions are reserved as a mechanism
# to pass information back to the primitive operator generator.
# The description of the function becomes the description
# of the primitive operator model in its operator model.
#------------------------------------------------------------------
# Example functions
#------------------------------------------------------------------
# Defines the SPL namespace for any functions in this module
# Multiple modules can map to the same namespace
def splNamespace():
return "com.ibm.streamsx.topology.pysamples.sources"
@spl.source()
class Range:
def __init__(self, count):
self.count = count
def __iter__(self):
return map(tuple, iter(range(self.count)))
| Python | 0.000001 |
b23a887edd6b55f2386c45c9b93c04431bceba5e | remove all__vary_rounds setting (deprecated in Passlib 1.7) | coremods/login.py | coremods/login.py | """
login.py - Implement core login abstraction.
"""
from pylinkirc import conf, utils, world
from pylinkirc.log import log
try:
from passlib.context import CryptContext
except ImportError:
CryptContext = None
log.warning("Hashed passwords are disabled because passlib is not installed. Please install "
"it (pip3 install passlib) and restart for this feature to work.")
pwd_context = None
if CryptContext:
pwd_context = CryptContext(["sha512_crypt", "sha256_crypt"],
sha256_crypt__default_rounds=180000,
sha512_crypt__default_rounds=90000)
def checkLogin(user, password):
"""Checks whether the given user and password is a valid combination."""
accounts = conf.conf['login'].get('accounts')
if not accounts:
# No accounts specified, return.
return False
# Lowercase account names to make them case insensitive. TODO: check for
# duplicates.
user = user.lower()
accounts = {k.lower(): v for k, v in accounts.items()}
try:
account = accounts[user]
except KeyError: # Invalid combination
return False
else:
passhash = account.get('password')
if not passhash:
# No password given, return. XXX: we should allow plugins to override
# this in the future.
return False
# Encryption in account passwords is optional (to not break backwards
# compatibility).
if account.get('encrypted', False):
return verifyHash(password, passhash)
else:
return password == passhash
def verifyHash(password, passhash):
"""Checks whether the password given matches the hash."""
if password:
if not pwd_context:
raise utils.NotAuthorizedError("Cannot log in to an account with a hashed password "
"because passlib is not installed.")
return pwd_context.verify(password, passhash)
return False # No password given!
| """
login.py - Implement core login abstraction.
"""
from pylinkirc import conf, utils, world
from pylinkirc.log import log
try:
from passlib.context import CryptContext
except ImportError:
CryptContext = None
log.warning("Hashed passwords are disabled because passlib is not installed. Please install "
"it (pip3 install passlib) and restart for this feature to work.")
pwd_context = None
if CryptContext:
pwd_context = CryptContext(["sha512_crypt", "sha256_crypt"],
all__vary_rounds=0.1,
sha256_crypt__default_rounds=180000,
sha512_crypt__default_rounds=90000)
def checkLogin(user, password):
"""Checks whether the given user and password is a valid combination."""
accounts = conf.conf['login'].get('accounts')
if not accounts:
# No accounts specified, return.
return False
# Lowercase account names to make them case insensitive. TODO: check for
# duplicates.
user = user.lower()
accounts = {k.lower(): v for k, v in accounts.items()}
try:
account = accounts[user]
except KeyError: # Invalid combination
return False
else:
passhash = account.get('password')
if not passhash:
# No password given, return. XXX: we should allow plugins to override
# this in the future.
return False
# Encryption in account passwords is optional (to not break backwards
# compatibility).
if account.get('encrypted', False):
return verifyHash(password, passhash)
else:
return password == passhash
def verifyHash(password, passhash):
"""Checks whether the password given matches the hash."""
if password:
if not pwd_context:
raise utils.NotAuthorizedError("Cannot log in to an account with a hashed password "
"because passlib is not installed.")
return pwd_context.verify(password, passhash)
return False # No password given!
| Python | 0 |
b79a80d894bdc39c8fa6f76fe50e222567f00df1 | Update cofnig_default: add elastic search config | config_default.py | config_default.py | # -*- coding: utf-8 -*-
"""
Created on 2015-10-23 08:06:00
@author: Tran Huu Cuong <tranhuucuong91@gmail.com>
"""
import os
# Blog configuration values.
# You may consider using a one-way hash to generate the password, and then
# use the hash again in the login view to perform the comparison. This is just
# for simplicity.
ADMIN_PASSWORD = 'admin@secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
PATH_SQLITE_DB=os.path.join(APP_DIR, 'blog.db')
# The playhouse.flask_utils.FlaskDB object accepts database URL configuration.
DATABASE = 'sqliteext:///{}'.format(PATH_SQLITE_DB)
DEBUG = False
# The secret key is used internally by Flask to encrypt session data stored
# in cookies. Make this unique for your app.
SECRET_KEY = 'shhh, secret!'
# This is used by micawber, which will attempt to generate rich media
# embedded objects with maxwidth=800.
SITE_WIDTH = 800
APP_HOST='127.0.0.1'
APP_PORT=5000
ES_HOST = {
"host": "172.17.42.1",
"port": 9200
}
ES_INDEX_NAME = 'notebooks'
ES_TYPE_NAME = 'notebooks'
| # -*- coding: utf-8 -*-
"""
Created on 2015-10-23 08:06:00
@author: Tran Huu Cuong <tranhuucuong91@gmail.com>
"""
import os
# Blog configuration values.
# You may consider using a one-way hash to generate the password, and then
# use the hash again in the login view to perform the comparison. This is just
# for simplicity.
ADMIN_PASSWORD = 'admin@secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
PATH_SQLITE_DB=os.path.join(APP_DIR, 'blog.db')
# The playhouse.flask_utils.FlaskDB object accepts database URL configuration.
DATABASE = 'sqliteext:///{}'.format(PATH_SQLITE_DB)
DEBUG = False
# The secret key is used internally by Flask to encrypt session data stored
# in cookies. Make this unique for your app.
SECRET_KEY = 'shhh, secret!'
# This is used by micawber, which will attempt to generate rich media
# embedded objects with maxwidth=800.
SITE_WIDTH = 800
APP_HOST='127.0.0.1'
APP_PORT=5000
| Python | 0 |
a7c084b4ff3d5529ca54209283d0e1a5984ebea2 | Fix lint error | tldextract/cli.py | tldextract/cli.py | '''tldextract CLI'''
import logging
import sys
from .tldextract import TLDExtract
from ._version import version as __version__
def main():
'''tldextract CLI main command.'''
import argparse
logging.basicConfig()
parser = argparse.ArgumentParser(
prog='tldextract',
description='Parse hostname from a url or fqdn')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('input', metavar='fqdn|url',
type=str, nargs='*', help='fqdn or url')
parser.add_argument('-u', '--update', default=False, action='store_true',
help='force fetch the latest TLD definitions')
parser.add_argument('-c', '--cache_dir',
help='use an alternate TLD definition caching folder')
parser.add_argument('-p', '--private_domains', default=False, action='store_true',
help='Include private domains')
args = parser.parse_args()
tld_extract = TLDExtract(include_psl_private_domains=args.private_domains)
if args.cache_dir:
tld_extract.cache_file = args.cache_file
if args.update:
tld_extract.update(True)
elif not args.input:
parser.print_usage()
sys.exit(1)
return
for i in args.input:
print(' '.join(tld_extract(i))) # pylint: disable=superfluous-parens
| '''tldextract CLI'''
import logging
import sys
from .tldextract import TLDExtract
from ._version import version as __version__
def main():
'''tldextract CLI main command.'''
import argparse
logging.basicConfig()
parser = argparse.ArgumentParser(
prog='tldextract',
description='Parse hostname from a url or fqdn')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('input', metavar='fqdn|url',
type=str, nargs='*', help='fqdn or url')
parser.add_argument('-u', '--update', default=False, action='store_true',
help='force fetch the latest TLD definitions')
parser.add_argument('-c', '--cache_dir',
help='use an alternate TLD definition caching folder')
parser.add_argument('-p', '--private_domains', default=False, action='store_true',
help='Include private domains')
args = parser.parse_args()
tld_extract = TLDExtract(include_psl_private_domains=args.private_domains)
if args.cache_dir:
tld_extract.cache_file = args.cache_file
if args.update:
tld_extract.update(True)
elif not args.input:
parser.print_usage()
sys.exit(1)
return
for i in args.input:
print(' '.join(tld_extract(i))) # pylint: disable=superfluous-parens
| Python | 0.000035 |
3f0930f4c7758bc690f01d09f743e24068db05c1 | extend benchmark to run both upload and download tests | tools/run_benchmark.py | tools/run_benchmark.py | import os
import time
import shutil
import subprocess
import sys
toolset = ''
if len(sys.argv) > 1:
toolset = sys.argv[1]
ret = os.system('cd ../examples && bjam boost=source profile statistics=on -j3 %s stage_client_test' % toolset)
ret = os.system('cd ../examples && bjam boost=source release -j3 %s stage_connection_tester' % toolset)
if ret != 0:
print 'ERROR: build failed: %d' % ret
sys.exit(1)
if not os.path.exists('cpu_benchmark.torrent'):
ret = os.system('../examples/connection_tester gen-torrent -s 10000 -n 15 -t cpu_benchmark.torrent')
if ret != 0:
print 'ERROR: connection_tester failed: %d' % ret
sys.exit(1)
try: shutil.rmtree('t')
except: pass
def run_test(name, test_cmd, client_arg, num_peers):
output_dir = 'logs_%s' % name
try: os.mkdir(output_dir)
except: pass
port = (int(time.time()) % 50000) + 2000
try: shutil.rmtree('session_stats')
except: pass
try: shutil.rmtree('session_stats_report')
except: pass
start = time.time();
client_cmd = '../examples/client_test -p %d cpu_benchmark.torrent -k -z -H -X -q 120 %s -h -c %d -T %d' \
% (port, client_arg, num_peers *2, num_peers*2)
test_cmd = '../examples/connection_tester %s -c %d -d 127.0.0.1 -p %d -t cpu_benchmark.torrent' % (test_cmd, num_peers, port)
client_out = open('%s/client.out' % output_dir, 'w+')
test_out = open('%s/test.out' % output_dir, 'w+')
print client_cmd
c = subprocess.Popen(client_cmd.split(' '), stdout=client_out, stderr=client_out, stdin=subprocess.PIPE)
time.sleep(2)
print test_cmd
t = subprocess.Popen(test_cmd.split(' '), stdout=test_out, stderr=test_out)
t.wait()
c.communicate('q')
c.wait()
end = time.time();
print 'runtime %d seconds' % (end - start)
print 'analyzing proile...'
os.system('gprof ../examples/client_test >%s/gprof.out' % output_dir)
print 'generating profile graph...'
os.system('python gprof2dot.py <%s/gprof.out | dot -Tps -o %s/cpu_profile.ps' % (output_dir, output_dir))
os.system('python parse_session_stats.py session_stats/*.log')
try: shutil.move('session_stats_report', '%s/session_stats_report' % output_dir)
except: pass
try: shutil.move('session_stats', '%s/session_stats' % output_dir)
except: pass
run_test('download', 'upload', '', 50)
run_test('upload', 'download', '-G', 5)
| import os
import time
import shutil
import subprocess
import sys
port = (int(time.time()) % 50000) + 2000
toolset = ''
if len(sys.argv) > 1:
toolset = sys.argv[1]
ret = os.system('cd ../examples && bjam boost=source profile statistics=on -j3 %s stage_client_test' % toolset)
ret = os.system('cd ../examples && bjam boost=source release -j3 %s stage_connection_tester' % toolset)
if ret != 0:
print 'ERROR: build failed: %d' % ret
sys.exit(1)
if not os.path.exists('cpu_benchmark.torrent'):
ret = os.system('../examples/connection_tester gen-torrent -s 10000 -n 15 -t cpu_benchmark.torrent')
if ret != 0:
print 'ERROR: connection_tester failed: %d' % ret
sys.exit(1)
try: shutil.rmtree('torrent_storage')
except: pass
try: shutil.rmtree('session_stats')
except: pass
try: os.mkdir('logs')
except: pass
start = time.time();
client_cmd = '../examples/client_test -p %d cpu_benchmark.torrent -k -0 -z -H -X -q 120' % port
test_cmd = '../examples/connection_tester upload -c 50 -d 127.0.0.1 -p %d -t cpu_benchmark.torrent' % port
client_out = open('logs/client.out', 'w+')
test_out = open('logs/test.out', 'w+')
print client_cmd
c = subprocess.Popen(client_cmd.split(' '), stdout=client_out, stderr=client_out, stdin=subprocess.PIPE)
time.sleep(2)
print test_cmd
t = subprocess.Popen(test_cmd.split(' '), stdout=test_out, stderr=test_out)
t.wait()
c.communicate('q')
c.wait()
end = time.time();
print 'runtime %d seconds' % (end - start)
print 'analyzing proile...'
os.system('gprof ../examples/client_test >logs/gprof.out')
print 'generating profile graph...'
os.system('python gprof2dot.py <logs/gprof.out | dot -Tpng -o logs/cpu_profile.png')
os.system('python parse_session_stats.py session_stats/*.log')
try: os.rename('session_stats_report', 'logs/session_stats_report')
except: pass
try: os.rename('session_stats', 'logs/session_stats')
except: pass
| Python | 0 |
c22ffd3c2c8feb0dfba2eb6df6fb8cbb49475cee | Remove un-used `message` arg, Fixes #4824 | salt/returners/sentry_return.py | salt/returners/sentry_return.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Salt returner that report error back to sentry
Pillar need something like::
raven:
servers:
- http://192.168.1.1
- https://sentry.example.com
public_key: deadbeefdeadbeefdeadbeefdeadbeef
secret_key: beefdeadbeefdeadbeefdeadbeefdead
project: 1
and http://pypi.python.org/pypi/raven installed
'''
import logging
try:
from raven import Client
has_raven = True
except ImportError:
has_raven = False
logger = logging.getLogger(__name__)
def __virtual__():
if not has_raven:
return False
return 'sentry'
def returner(ret):
'''
If an error occurs, log it to sentry
'''
def connect_sentry(result):
pillar_data = __salt__['pillar.raw']()
sentry_data = {
'result': result,
'returned': ret,
'pillar': pillar_data,
'grains': __salt__['grains.items']()
}
servers = []
try:
for server in pillar_data['raven']['servers']:
servers.append(server + '/api/store/')
client = Client(
servers=servers,
public_key=pillar_data['raven']['public_key'],
secret_key=pillar_data['raven']['secret_key'],
project=pillar_data['raven']['project'],
)
except KeyError as missing_key:
logger.error("Sentry returner need config '%s' in pillar",
missing_key)
else:
try:
client.captureMessage(ret['comment'], extra=sentry_data)
except Exception as err:
logger.error("Can't send message to sentry: %s", err,
exc_info=True)
requisite_error = 'One or more requisite failed'
try:
if 'success' not in ret:
logger.debug('no success data, report')
connect_sentry(ret['return'], ret)
else:
if not ret['success']:
logger.debug('not a success, report')
connect_sentry(ret['return'], ret)
else:
for state in ret['return']:
if not ret['return'][state]['result'] and \
ret['return'][state]['comment'] != requisite_error:
connect_sentry(state, ret['return'][state])
except Exception as err:
logger.error("Can't run connect_sentry: %s", err, exc_info=True)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Salt returner that report error back to sentry
Pillar need something like::
raven:
servers:
- http://192.168.1.1
- https://sentry.example.com
public_key: deadbeefdeadbeefdeadbeefdeadbeef
secret_key: beefdeadbeefdeadbeefdeadbeefdead
project: 1
and http://pypi.python.org/pypi/raven installed
'''
import logging
try:
from raven import Client
has_raven = True
except ImportError:
has_raven = False
logger = logging.getLogger(__name__)
def __virtual__():
if not has_raven:
return False
return 'sentry'
def returner(ret):
'''
If an error occurs, log it to sentry
'''
def connect_sentry(message, result):
pillar_data = __salt__['pillar.raw']()
sentry_data = {
'result': result,
'returned': ret,
'pillar': pillar_data,
'grains': __salt__['grains.items']()
}
servers = []
try:
for server in pillar_data['raven']['servers']:
servers.append(server + '/api/store/')
client = Client(
servers=servers,
public_key=pillar_data['raven']['public_key'],
secret_key=pillar_data['raven']['secret_key'],
project=pillar_data['raven']['project'],
)
except KeyError as missing_key:
logger.error("Sentry returner need config '%s' in pillar",
missing_key)
else:
try:
client.captureMessage(ret['comment'], extra=sentry_data)
except Exception as err:
logger.error("Can't send message to sentry: %s", err,
exc_info=True)
requisite_error = 'One or more requisite failed'
try:
if 'success' not in ret:
logger.debug('no success data, report')
connect_sentry(ret['return'], ret)
else:
if not ret['success']:
logger.debug('not a success, report')
connect_sentry(ret['return'], ret)
else:
for state in ret['return']:
if not ret['return'][state]['result'] and \
ret['return'][state]['comment'] != requisite_error:
connect_sentry(state, ret['return'][state])
except Exception as err:
logger.error("Can't run connect_sentry: %s", err, exc_info=True)
| Python | 0 |
e7c462af8382a5eb7f5fee2abfc04f002e36b193 | Add varint and varlong tests | tests/mcp/test_datautils.py | tests/mcp/test_datautils.py | from spock.mcp import datautils
from spock.utils import BoundBuffer
def test_unpack_varint():
largebuff = BoundBuffer(b'\x80\x94\xeb\xdc\x03')
smallbuff = BoundBuffer(b'\x14')
assert datautils.unpack_varint(smallbuff) == 20
assert datautils.unpack_varint(largebuff) == 1000000000
def test_pack_varint():
assert datautils.pack_varint(20) == b'\x14'
assert datautils.pack_varint(1000000000) == b'\x80\x94\xeb\xdc\x03'
assert datautils.pack_varint(-10000000000) is None
assert datautils.pack_varint(10000000000) is None
def test_unpack_varlong():
largebuff = BoundBuffer(b'\x80\xc8\xaf\xa0%')
smallbuff = BoundBuffer(b'\x14')
assert datautils.unpack_varlong(smallbuff) == 20
assert datautils.unpack_varlong(largebuff) == 10000000000
pass
def test_pack_varlong():
assert datautils.pack_varlong(20) == b'\x14'
assert datautils.pack_varlong(10000000000) == b'\x80\xc8\xaf\xa0%'
assert datautils.pack_varlong(10000000000000000000) is None
assert datautils.pack_varlong(-10000000000000000000) is None
| Python | 0 | |
4e887718e44453f0f0cd65addc0284668b31bbd2 | Disable session cache | src/clarityv2/conf/production.py | src/clarityv2/conf/production.py | from .base import *
import raven
#
# Standard Django settings.
#
DEBUG = False
ENVIRONMENT = 'production'
ADMINS = (
'Alex', 'khomenkodev17@gmail.com'
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.getenv('SECRET_KEY')
ALLOWED_HOSTS = ['claritydev.net', '188.166.1.116', '0.0.0.0']
# # Redis cache backend
# # NOTE: If you do not use a cache backend, do not use a session backend or
# # cached template loaders that rely on a backend.
# CACHES = {
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": "redis://127.0.0.1:6379/2", # NOTE: watch out for multiple projects using the same cache!
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# "IGNORE_EXCEPTIONS": True,
# }
# }
# }
#
# # Caching sessions.
# SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# SESSION_CACHE_ALIAS = "default"
# Caching templates.
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', RAW_TEMPLATE_LOADERS),
]
# The file storage engine to use when collecting static files with the
# collectstatic management command.
# STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# Production logging facility.
LOGGING['loggers'].update({
'': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': False,
},
'clarityv2': {
'handlers': ['project'],
'level': 'WARNING',
'propagate': True,
},
'django': {
'handlers': ['django'],
'level': 'WARNING',
'propagate': True,
},
'django.security.DisallowedHost': {
'handlers': ['django'],
'level': 'CRITICAL',
'propagate': False,
},
})
#
# Custom settings
#
# Show active environment in admin.
SHOW_ALERT = False
# We will assume we're running under https
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = False # TODO enable after SSL is setup
X_FRAME_OPTIONS = 'DENY'
# Only set this when we're behind Nginx as configured in our example-deployment
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#
# Library settings
#
# Raven
INSTALLED_APPS = INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
# RAVEN_CONFIG = {
# 'dsn': 'https://',
# 'release': raven.fetch_git_sha(os.path.dirname(os.pardir)),
# }
LOGGING['handlers'].update({
'sentry': {
'level': 'WARNING',
'class': 'raven.handlers.logging.SentryHandler',
# 'dsn': RAVEN_CONFIG['dsn']
},
})
| from .base import *
import raven
#
# Standard Django settings.
#
DEBUG = False
ENVIRONMENT = 'production'
ADMINS = (
'Alex', 'khomenkodev17@gmail.com'
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.getenv('SECRET_KEY')
ALLOWED_HOSTS = ['claritydev.net', '188.166.1.116', '0.0.0.0']
# Redis cache backend
# NOTE: If you do not use a cache backend, do not use a session backend or
# cached template loaders that rely on a backend.
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2", # NOTE: watch out for multiple projects using the same cache!
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True,
}
}
}
# Caching sessions.
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_CACHE_ALIAS = "default"
# Caching templates.
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', RAW_TEMPLATE_LOADERS),
]
# The file storage engine to use when collecting static files with the
# collectstatic management command.
# STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# Production logging facility.
LOGGING['loggers'].update({
'': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': False,
},
'clarityv2': {
'handlers': ['project'],
'level': 'WARNING',
'propagate': True,
},
'django': {
'handlers': ['django'],
'level': 'WARNING',
'propagate': True,
},
'django.security.DisallowedHost': {
'handlers': ['django'],
'level': 'CRITICAL',
'propagate': False,
},
})
#
# Custom settings
#
# Show active environment in admin.
SHOW_ALERT = False
# We will assume we're running under https
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = False # TODO enable after SSL is setup
X_FRAME_OPTIONS = 'DENY'
# Only set this when we're behind Nginx as configured in our example-deployment
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#
# Library settings
#
# Raven
INSTALLED_APPS = INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
# RAVEN_CONFIG = {
# 'dsn': 'https://',
# 'release': raven.fetch_git_sha(os.path.dirname(os.pardir)),
# }
LOGGING['handlers'].update({
'sentry': {
'level': 'WARNING',
'class': 'raven.handlers.logging.SentryHandler',
# 'dsn': RAVEN_CONFIG['dsn']
},
})
| Python | 0.000001 |
ca885203ab82026ca21a200c1bee5ad3c0a82cb5 | Change default interval | src/cmsplugin_carousel/models.py | src/cmsplugin_carousel/models.py | from adminsortable.models import SortableMixin
from cms.models import CMSPlugin
from cms.models.fields import PageField
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.fields.image import FilerImageField
class CarouselPlugin(CMSPlugin):
interval = models.PositiveIntegerField(_('Interval'), default=5)
title = models.CharField(_('Title'), max_length=255, default='', blank=True)
def __str__(self):
return self.title or str(self.pk)
def copy_relations(self, oldinstance):
super(CarouselPlugin, self).copy_relations(oldinstance)
for picture in oldinstance.pictures.all().iterator():
picture.pk = None
picture.plugin = self
picture.save()
class CarouselPicture(SortableMixin):
plugin = models.ForeignKey(CarouselPlugin, related_name='pictures')
image = FilerImageField(verbose_name=_('Image'), related_name='+')
alt_tag = models.CharField(_('Alt tag'), max_length=255, blank=True)
text = models.TextField(verbose_name=_('Text over image'), blank=True)
url = models.CharField(verbose_name=_('URL'), blank=True, null=True, max_length=500)
page = PageField(verbose_name=_("Page"), blank=True, null=True)
open_in_tab = models.BooleanField(verbose_name=_('Open in new window'))
ordering = models.PositiveIntegerField(default=0, editable=False, db_index=True)
class Meta:
ordering = ['ordering', ]
def link(self):
if self.page is not None:
return self.page
else:
return self.url
def __str__(self):
return self.alt_tag
| from adminsortable.models import SortableMixin
from cms.models import CMSPlugin
from cms.models.fields import PageField
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.fields.image import FilerImageField
class CarouselPlugin(CMSPlugin):
interval = models.PositiveIntegerField(_('Interval'), default=1)
title = models.CharField(_('Title'), max_length=255, default='', blank=True)
def __str__(self):
return self.title or str(self.pk)
def copy_relations(self, oldinstance):
super(CarouselPlugin, self).copy_relations(oldinstance)
for picture in oldinstance.pictures.all().iterator():
picture.pk = None
picture.plugin = self
picture.save()
class CarouselPicture(SortableMixin):
plugin = models.ForeignKey(CarouselPlugin, related_name='pictures')
image = FilerImageField(verbose_name=_('Image'), related_name='+')
alt_tag = models.CharField(_('Alt tag'), max_length=255, blank=True)
text = models.TextField(verbose_name=_('Text over image'), blank=True)
url = models.CharField(verbose_name=_('URL'), blank=True, null=True, max_length=500)
page = PageField(verbose_name=_("Page"), blank=True, null=True)
open_in_tab = models.BooleanField(verbose_name=_('Open in new window'))
ordering = models.PositiveIntegerField(default=0, editable=False, db_index=True)
class Meta:
ordering = ['ordering', ]
def link(self):
if self.page is not None:
return self.page
else:
return self.url
def __str__(self):
return self.alt_tag
| Python | 0.000001 |
25508fef8d2632835bf29e22a39ef1d70b615f62 | make PooledConnection more robust to other kinds of exceptions | connection.py | connection.py | import threading
from Queue import Queue
from thrift import Thrift
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from cassandra import Cassandra
__all__ = ['connect', 'connect_thread_local', 'connect_pooled']
DEFAULT_SERVER = 'localhost:9160'
def create_client_transport(server):
host, port = server.split(":")
socket = TSocket.TSocket(host, int(port))
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
transport.open()
return client, transport
def connect(server=DEFAULT_SERVER):
"""
Construct a Cassandra connection
Parameters
----------
server : str
Cassandra server with format: "hostname:port"
Default: 'localhost:9160'
Returns
-------
Cassandra client
"""
return create_client_transport(server)[0]
def connect_pooled(servers=None):
"""
Construct a pooled queue of Cassandra connections, given by the servers list
Parameters
----------
servers: [server]
List of Cassandra servers with format: "hostname:port"
Create duplicate server entries if you want multiple connections
to the same server.
Default: 5 * ['localhost:9160']
(5 connections to the server at localhost)
Returns
-------
Cassandra client
"""
if servers is None:
servers = 5 * [DEFAULT_SERVER]
return PooledConnection(servers)
class PooledConnection(object):
def __init__(self, servers):
self.queue = Queue()
for server in servers:
self.queue.put((server, None, None))
def __getattr__(self, attr):
def client_call(*args, **kwargs):
server, client, transport = self.queue.get()
try:
if client is None:
client, transport = create_client_transport(server)
return getattr(client, attr)(*args, **kwargs)
except Thrift.TException, exc:
# Connection error, try a new server next time
transport.close()
client, transport = None, None
raise exc
finally:
self.queue.put((server, client, transport))
setattr(self, attr, client_call)
return getattr(self, attr)
def connect_thread_local(servers=None):
"""
Construct a Cassandra connection for each thread
Parameters
----------
servers: [server]
List of Cassandra servers with format: "hostname:port"
Default: ['localhost:9160']
Returns
-------
Cassandra client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return ThreadLocalConnection(servers)
class ThreadLocalConnection(object):
def __init__(self, servers):
self.queue = Queue()
for server in servers:
self.queue.put(server)
self.local = threading.local()
def __getattr__(self, attr):
def client_call(*args, **kwargs):
if getattr(self.local, 'client', None) is None:
server = self.queue.get()
self.queue.put(server)
self.local.client, self.local.transport = create_client_transport(server)
try:
return getattr(self.local.client, attr)(*args, **kwargs)
except Thrift.TException, exc:
# Connection error, try a new server next time
self.local.transport.close()
self.local.client = None
raise exc
setattr(self, attr, client_call)
return getattr(self, attr)
| import threading
from Queue import Queue
from thrift import Thrift
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from cassandra import Cassandra
__all__ = ['connect', 'connect_thread_local', 'connect_pooled']
DEFAULT_SERVER = 'localhost:9160'
def create_client_transport(server):
host, port = server.split(":")
socket = TSocket.TSocket(host, int(port))
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
transport.open()
return client, transport
def connect(server=DEFAULT_SERVER):
"""
Construct a Cassandra connection
Parameters
----------
server : str
Cassandra server with format: "hostname:port"
Default: 'localhost:9160'
Returns
-------
Cassandra client
"""
return create_client_transport(server)[0]
def connect_pooled(servers=None):
"""
Construct a pooled queue of Cassandra connections, given by the servers list
Parameters
----------
servers: [server]
List of Cassandra servers with format: "hostname:port"
Create duplicate server entries if you want multiple connections
to the same server.
Default: 5 * ['localhost:9160']
(5 connections to the server at localhost)
Returns
-------
Cassandra client
"""
if servers is None:
servers = 5 * [DEFAULT_SERVER]
return PooledConnection(servers)
class PooledConnection(object):
def __init__(self, servers):
self.queue = Queue()
for server in servers:
self.queue.put((server, None, None))
def __getattr__(self, attr):
def client_call(*args, **kwargs):
server, client, transport = self.queue.get()
try:
if client is None:
client, transport = create_client_transport(server)
ret = getattr(client, attr)(*args, **kwargs)
self.queue.put((server, client, transport))
return ret
except Thrift.TException, exc:
# Connection error, try a new server next time
transport.close()
self.queue.put((server, None, None))
raise exc
setattr(self, attr, client_call)
return getattr(self, attr)
def connect_thread_local(servers=None):
"""
Construct a Cassandra connection for each thread
Parameters
----------
servers: [server]
List of Cassandra servers with format: "hostname:port"
Default: ['localhost:9160']
Returns
-------
Cassandra client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return ThreadLocalConnection(servers)
class ThreadLocalConnection(object):
def __init__(self, servers):
self.queue = Queue()
for server in servers:
self.queue.put(server)
self.local = threading.local()
def __getattr__(self, attr):
def client_call(*args, **kwargs):
if getattr(self.local, 'client', None) is None:
server = self.queue.get()
self.queue.put(server)
self.local.client, self.local.transport = create_client_transport(server)
try:
return getattr(self.local.client, attr)(*args, **kwargs)
except Thrift.TException, exc:
# Connection error, try a new server next time
self.local.transport.close()
self.local.client = None
raise exc
setattr(self, attr, client_call)
return getattr(self, attr)
| Python | 0 |
d8077e7de68d2059ba338b650cfd1904686af754 | fix problem in thread-local connections where it was reconnecting every function call | connection.py | connection.py | from exceptions import Exception
import threading
from Queue import Queue
from thrift import Thrift
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from cassandra import Cassandra
__all__ = ['connect', 'connect_thread_local', 'NoServerAvailable']
DEFAULT_SERVER = 'localhost:9160'
class NoServerAvailable(Exception):
pass
def create_client_transport(server):
host, port = server.split(":")
socket = TSocket.TSocket(host, int(port))
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
transport.open()
return client, transport
def connect(servers=None):
"""
Constructs a single Cassandra connection. Initially connects to the first
server on the list.
If the connection fails, it will attempt to connect to each server on the
list in turn until one succeeds. If it is unable to find an active server,
it will throw a NoServerAvailable exception.
Parameters
----------
servers : [server]
List of Cassandra servers with format: "hostname:port"
Default: ['localhost:9160']
Returns
-------
Cassandra client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return SingleConnection(servers)
def connect_thread_local(servers=None, round_robin=True):
"""
Constructs a Cassandra connection for each thread. By default, it attempts
to connect in a round_robin (load-balancing) fashion. Turn it off by
setting round_robin=False
If the connection fails, it will attempt to connect to each server on the
list in turn until one succeeds. If it is unable to find an active server,
it will throw a NoServerAvailable exception.
Parameters
----------
servers : [server]
List of Cassandra servers with format: "hostname:port"
Default: ['localhost:9160']
round_robin : bool
Balance the connections. Set to False to connect to each server
in turn.
Returns
-------
Cassandra client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return ThreadLocalConnection(servers, round_robin)
class SingleConnection(object):
def __init__(self, servers):
self._servers = servers
self._client = None
def __getattr__(self, attr):
def client_call(*args, **kwargs):
if self._client is None:
self._find_server()
try:
return getattr(self._client, attr)(*args, **kwargs)
except Thrift.TException as exc:
# Connection error, try to connect to all the servers
self._transport.close()
self._client = None
self._find_server()
setattr(self, attr, client_call)
return getattr(self, attr)
def _find_server(self):
for server in self._servers:
try:
self._client, self._transport = create_client_transport(server)
return
except Thrift.TException as exc:
continue
raise NoServerAvailable()
class ThreadLocalConnection(object):
def __init__(self, servers, round_robin):
self._servers = servers
self._queue = Queue()
for i in xrange(len(servers)):
self._queue.put(i)
self._local = threading.local()
self._round_robin = round_robin
def __getattr__(self, attr):
def client_call(*args, **kwargs):
if getattr(self._local, 'client', None) is None:
self._find_server()
try:
return getattr(self._local.client, attr)(*args, **kwargs)
except Thrift.TException as exc:
# Connection error, try to connect to all the servers
self._local.transport.close()
self._local.client = None
self._find_server()
setattr(self, attr, client_call)
return getattr(self, attr)
def _find_server(self):
servers = self._servers
if self._round_robin:
i = self._queue.get()
self._queue.put(i)
servers = servers[i:]+servers[:i]
for server in servers:
try:
self._local.client, self._local.transport = create_client_transport(server)
return
except Thrift.TException as exc:
continue
raise NoServerAvailable()
| from exceptions import Exception
import threading
from Queue import Queue
from thrift import Thrift
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from cassandra import Cassandra
__all__ = ['connect', 'connect_thread_local', 'NoServerAvailable']
DEFAULT_SERVER = 'localhost:9160'
class NoServerAvailable(Exception):
pass
def create_client_transport(server):
host, port = server.split(":")
socket = TSocket.TSocket(host, int(port))
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
transport.open()
return client, transport
def connect(servers=None):
"""
Constructs a single Cassandra connection. Initially connects to the first
server on the list.
If the connection fails, it will attempt to connect to each server on the
list in turn until one succeeds. If it is unable to find an active server,
it will throw a NoServerAvailable exception.
Parameters
----------
servers : [server]
List of Cassandra servers with format: "hostname:port"
Default: ['localhost:9160']
Returns
-------
Cassandra client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return SingleConnection(servers)
def connect_thread_local(servers=None, round_robin=True):
"""
Constructs a Cassandra connection for each thread. By default, it attempts
to connect in a round_robin (load-balancing) fashion. Turn it off by
setting round_robin=False
If the connection fails, it will attempt to connect to each server on the
list in turn until one succeeds. If it is unable to find an active server,
it will throw a NoServerAvailable exception.
Parameters
----------
servers : [server]
List of Cassandra servers with format: "hostname:port"
Default: ['localhost:9160']
round_robin : bool
Balance the connections. Set to False to connect to each server
in turn.
Returns
-------
Cassandra client
"""
if servers is None:
servers = [DEFAULT_SERVER]
return ThreadLocalConnection(servers, round_robin)
class SingleConnection(object):
def __init__(self, servers):
self._servers = servers
self._client = None
def __getattr__(self, attr):
def client_call(*args, **kwargs):
if self._client is None:
self._find_server()
try:
return getattr(self._client, attr)(*args, **kwargs)
except Thrift.TException as exc:
# Connection error, try to connect to all the servers
self._transport.close()
self._client = None
self._find_server()
setattr(self, attr, client_call)
return getattr(self, attr)
def _find_server(self):
for server in self._servers:
try:
self._client, self._transport = create_client_transport(server)
return
except Thrift.TException as exc:
continue
raise NoServerAvailable()
class ThreadLocalConnection(object):
def __init__(self, servers, round_robin):
self._servers = servers
self._queue = Queue()
for i in xrange(len(servers)):
self._queue.put(i)
self._local = threading.local()
self._round_robin = round_robin
def __getattr__(self, attr):
def client_call(*args, **kwargs):
if getattr(self.local, 'client', None) is None:
self._find_server()
try:
return getattr(self._local.client, attr)(*args, **kwargs)
except Thrift.TException as exc:
# Connection error, try to connect to all the servers
self._local.transport.close()
self._local.client = None
self._find_server()
setattr(self, attr, client_call)
return getattr(self, attr)
def _find_server(self):
servers = self._servers
if self._round_robin:
i = self._queue.get()
self._queue.put(i)
servers = servers[i:]+servers[:i]
for server in servers:
try:
self._local.client, self._local.transport = create_client_transport(server)
return
except Thrift.TException as exc:
continue
raise NoServerAvailable()
| Python | 0.000001 |
64e3f7c56d8c395aebf5bc15fb03264fb9b390bb | Update Admin.py | Plugins/Admin.py | Plugins/Admin.py | import discord
from discord.ext import commands
import random
import asyncio
import Dependencies
from datetime import datetime
class Admin():
def __init__(self, bot):
self.bot = bot
# strike command
@commands.has_role("Mods")
@commands.command(pass_context=True)
async def strike(self, ctx, member : str=None, *, reason : str=None):
'''Gives a strike to a specified person.'''
if member is None:
await self.bot.say('Please input a user.')
elif member is not None and reason is None:
await self.bot.say('Please input a reason')
elif member is not None and reason is not None:
member = ctx.message.mentions[0]
strike_embed = discord.Embed(title="Strike", description= 'User: **{0}** \nReason: {1}'.format(member, reason), color=discord.Color.red())
strike_embed.set_footer(text='Strike')
await self.bot.send_message(discord.utils.get(ctx.message.server.channels, name="strikes"), '<@&332973960318943233>', embed=strike_embed)
strike_embed = discord.Embed(title="Strike", description= 'You have been given a strike in {0}. \nReason: {1}'.format(ctx.message.server, reason), color=discord.Color.red())
strike_embed.set_footer(text='Strike')
await self.bot.send_message(member, embed=strike_embed)
logMsg = "{0} has been striked on the {1} server. Reason: {2}".format(member, ctx.message.server, reason)
log(logMsg)
@commands.has_permissions(kick_members=True)
@commands.command(pass_context=True)
async def kick(self, ctx, member : str=None, *, reason : str=None):
'''Kicks a specified person.'''
if member is None:
await self.bot.say('Please input a user.')
elif member is not None and reason is None:
await self.bot.say('Please input a reason')
elif member is not None and reason is not None:
member = ctx.message.mentions[0]
await self.bot.kick(member)
await self.bot.say('Kicked {0}. Reason: {1}'.format(member, reason))
log('{0} has been kicked from {1}. Reason: {2}'.format(member, ctx.message.server, reason))
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True)
async def ban(self, ctx, member : str=None, *, reason : str=None):
'''Bans a specified person.'''
if member is None:
await self.bot.say('Please input a user.')
elif member is not None and reason is None:
await self.bot.say('Please input a reason')
elif member is not None and reason is not None:
member = ctx.message.mentions[0]
await self.bot.ban(member)
await self.bot.say('Kicked {0}. Reason: {1}'.format(member, reason))
log('{0} has been kicked from {1}. Reason: {2}'.format(member, ctx.message.server, reason))
def log(message):
print(datetime.now(), message)
def setup(bot):
bot.add_cog(Admin(bot))
| import discord
from discord.ext import commands
import random
import asyncio
import Dependencies
from datetime import datetime
class Admin():
def __init__(self, bot):
self.bot = bot
# strike command
@commands.has_role("Mods")
@commands.command(pass_context=True)
async def strike(self, ctx, member : str=None, *, reason : str=None):
'''Gives a strike to a specified person.'''
if member is None:
await self.bot.say('Please input a user.')
elif member is not None and reason is None:
await self.bot.say('Please input a reason')
elif member is not None and reason is not None:
member = ctx.message.mentions[0]
strike_embed = discord.Embed(title="Strike", description= 'User: **{0}** \nReason: {1}'.format(member, reason), color=discord.Color.red())
strike_embed.set_footer(text='Strike')
await self.bot.send_message(discord.utils.get(ctx.message.server.channels, name="strikes"), '<@&332973960318943233>', embed=strike_embed)
strike_embed = discord.Embed(title="Strike", description= 'You have been given a strike on the {0} server. \nReason: {1}'.format(ctx.message.server, reason), color=discord.Color.red())
strike_embed.set_footer(text='Strike')
await self.bot.send_message(member, embed=strike_embed)
logMsg = "{0} has been striked on the {1} server. Reason: {2}".format(member, ctx.message.server, reason)
log(logMsg)
@commands.has_permissions(kick_members=True)
@commands.command(pass_context=True)
async def kick(self, ctx, member : str=None, *, reason : str=None):
'''Kicks a specified person.'''
if member is None:
await self.bot.say('Please input a user.')
elif member is not None and reason is None:
await self.bot.say('Please input a reason')
elif member is not None and reason is not None:
member = ctx.message.mentions[0]
await self.bot.kick(member)
await self.bot.say('Kicked {0}. Reason: {1}'.format(member, reason))
log('{0} has been kicked from {1}. Reason: {2}'.format(member, ctx.message.server, reason))
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True)
async def ban(self, ctx, member : str=None, *, reason : str=None):
'''Bans a specified person.'''
if member is None:
await self.bot.say('Please input a user.')
elif member is not None and reason is None:
await self.bot.say('Please input a reason')
elif member is not None and reason is not None:
member = ctx.message.mentions[0]
await self.bot.ban(member)
await self.bot.say('Kicked {0}. Reason: {1}'.format(member, reason))
log('{0} has been kicked from {1}. Reason: {2}'.format(member, ctx.message.server, reason))
def log(message):
print(datetime.now(), message)
def setup(bot):
bot.add_cog(Admin(bot))
| Python | 0.000001 |
8bcd0063ce0ede395172409c5bcbe778a54cf92c | Fix bug in api related to querying mapobject types | tmaps/mapobject/api.py | tmaps/mapobject/api.py | import os.path as p
import json
from flask.ext.jwt import jwt_required
from flask.ext.jwt import current_identity
from flask.ext.jwt import jwt_required
from flask import jsonify, request
from sqlalchemy.sql import text
from tmaps.api import api
from tmaps.extensions import db
from tmaps.mapobject import MapobjectOutline, MapobjectType
from tmaps.experiment import Experiment
from tmaps.response import (
MALFORMED_REQUEST_RESPONSE,
RESOURCE_NOT_FOUND_RESPONSE,
NOT_AUTHORIZED_RESPONSE
)
@api.route('/experiments/<experiment_id>/mapobjects/<object_name>', methods=['GET'])
def get_mapobjects_tile(experiment_id, object_name):
ex = db.session.query(Experiment).get_with_hash(experiment_id)
if not ex:
return RESOURCE_NOT_FOUND_RESPONSE
# TODO: Requests should have a auth token
# if not ex.belongs_to(current_identity):
# return NOT_AUTHORIZED_RESPONSE
# The coordinates of the requested tile
x = request.args.get('x')
y = request.args.get('y')
z = request.args.get('z')
zlevel = request.args.get('zlevel')
t = request.args.get('t')
# Check arguments for validity and convert to integers
if any([var is None for var in [x, y, z, zlevel, t]]):
return MALFORMED_REQUEST_RESPONSE
else:
x, y, z, zlevel, t = map(int, [x, y, z, zlevel, t])
if object_name == 'DEBUG_TILE':
maxzoom = ex.channels[0].layers[0].maxzoom_level_index
minx, miny, maxx, maxy = MapobjectOutline.create_tile(x, y, z, maxzoom)
return jsonify({
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[
[maxx, maxy], [minx, maxy], [minx, miny], [maxx, miny],
[maxx, maxy]
]]
},
'properties': {
'x': x,
'y': y,
'z': z,
'type': 'DEBUG_TILE'
}
})
mapobject_type = \
db.session.query(MapobjectType).\
filter_by(name=object_name, experiment_id=ex.id).one()
query_res = mapobject_type.get_mapobject_outlines_within_tile(
x, y, z, t, zlevel)
features = []
if len(query_res) > 0:
# Try to estimate how many points there are in total within
# the polygons of this tile.
for mapobject_id, geom_geojson_str in query_res:
feature = {
"type": "Feature",
"id": mapobject_id,
"geometry": json.loads(geom_geojson_str),
"properties": {
"type": object_name
}
}
features.append(feature)
return jsonify({
"type": "FeatureCollection",
"features": features
})
| import os.path as p
import json
from flask.ext.jwt import jwt_required
from flask.ext.jwt import current_identity
from flask.ext.jwt import jwt_required
from flask import jsonify, request
from sqlalchemy.sql import text
from tmaps.api import api
from tmaps.extensions import db
from tmaps.mapobject import MapobjectOutline, MapobjectType
from tmaps.experiment import Experiment
from tmaps.response import (
MALFORMED_REQUEST_RESPONSE,
RESOURCE_NOT_FOUND_RESPONSE,
NOT_AUTHORIZED_RESPONSE
)
@api.route('/experiments/<experiment_id>/mapobjects/<object_name>', methods=['GET'])
def get_mapobjects_tile(experiment_id, object_name):
ex = db.session.query(Experiment).get_with_hash(experiment_id)
if not ex:
return RESOURCE_NOT_FOUND_RESPONSE
# TODO: Requests should have a auth token
# if not ex.belongs_to(current_identity):
# return NOT_AUTHORIZED_RESPONSE
# The coordinates of the requested tile
x = request.args.get('x')
y = request.args.get('y')
z = request.args.get('z')
zlevel = request.args.get('zlevel')
t = request.args.get('t')
# Check arguments for validity and convert to integers
if any([var is None for var in [x, y, z, zlevel, t]]):
return MALFORMED_REQUEST_RESPONSE
else:
x, y, z, zlevel, t = map(int, [x, y, z, zlevel, t])
if object_name == 'DEBUG_TILE':
maxzoom = ex.channels[0].layers[0].maxzoom_level_index
minx, miny, maxx, maxy = MapobjectOutline.create_tile(x, y, z, maxzoom)
return jsonify({
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[
[maxx, maxy], [minx, maxy], [minx, miny], [maxx, miny],
[maxx, maxy]
]]
},
'properties': {
'x': x,
'y': y,
'z': z,
'type': 'DEBUG_TILE'
}
})
mapobject_type = \
db.session.query(MapobjectType).\
filter_by(name=object_name).one()
query_res = mapobject_type.get_mapobject_outlines_within_tile(
x, y, z, t, zlevel)
features = []
if len(query_res) > 0:
# Try to estimate how many points there are in total within
# the polygons of this tile.
for mapobject_id, geom_geojson_str in query_res:
feature = {
"type": "Feature",
"id": mapobject_id,
"geometry": json.loads(geom_geojson_str),
"properties": {
"type": object_name
}
}
features.append(feature)
return jsonify({
"type": "FeatureCollection",
"features": features
})
| Python | 0 |
f96f3f6ac5ca5f9301c2c463b0a3f4f710187f21 | Use utf-8 | constantes.py | constantes.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from BeautifulSoup import BeautifulSoup
import requests
def get_profs():
r = requests.get("http://www.heb.be/esi/personnel_fr.htm")
soup = BeautifulSoup(r.text)
soup = soup.findAll('ul')[2]
profs = {}
for line in soup:
line = str(line)
if "profs" in line:
abbr = line.split("(")[1].split(")")[0]
prof = line.split(">")[2].split("<")[0]
profs[abbr] = prof.decode('utf-8')
HOURS = [
'08:15',
'09:15',
'10:30',
'11:30',
'12:30',
'13:45',
'14:45',
'16:00',
'17:00',
]
DAYS = {
0: 'Lundi',
1: 'Mardi',
2: 'Mercredi',
3: 'Jeudi',
4: 'Vendredi',
}
MONTHS = {
'janvier' : '01',
'février' : '02',
'mars' : '03',
'avril' : '04',
'mai' : '05',
'juin' : '06',
'juillet' : '07',
'aout' : '08',
'septembre': '09',
'octobre' : '10',
'novembre' : '11',
'décembre' : '12',
}
PROFS = {
'ADT': 'Alain Detaille',
'ARO': 'Anne Rousseau',
'ART': 'Anne Rayet',
'BDL': 'Bénoni Delfosse',
'BEJ': 'Jonas Beleho',
'CIH': 'Yashar Cihan',
'CLG': 'Christine Leignel',
'CLR': 'Catherine Leruste',
'CUV': 'Geneviève Cuvelier',
'DNA': 'David Nabet',
'DWI': 'Didier Willame',
'EFO': 'Eric Fontaine',
'EGR': 'Eric Georges',
'ELV': 'Eytan Levy',
'FPL': 'Frédéric Pluquet',
'GVA': 'Gilles Van Assche',
'HAL': 'Amine Hallal',
'JCJ': 'Jean-Claude Jaumain',
'JDM': 'Jacqueline De Mesmaeker',
'JDS': 'Jérôme Dossogne',
'JMA': 'Jean-Marc André',
'LBC': 'Laurent Beeckmans',
'MAP': 'Michel Applaincourt',
'MBA': 'Monica Bastreghi',
'MCD': 'Marco Codutti',
'MHI': 'Mohamed Hadjili',
'MWA': 'Moussa Wahid',
'MWI': 'Michel Willemse',
'NPX': 'Nicolas Pettiaux',
'NVS': 'Nicolas Vansteenkiste',
'PBT': 'Pierre Bettens',
'PMA': 'Pantelis Matsos',
'RPL': 'René-Philippe Legrand',
'SRV': 'Frédéric Servais',
'YPR': 'Yves Pierseaux',
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from BeautifulSoup import BeautifulSoup
import requests
def get_profs():
r = requests.get("http://www.heb.be/esi/personnel_fr.htm")
soup = BeautifulSoup(r.text)
soup = soup.findAll('ul')[2]
profs = {}
for line in soup:
line = str(line)
if "profs" in line:
abbr = line.split("(")[1].split(")")[0]
prof = line.split(">")[2].split("<")[0]
profs[abbr] = prof.decode('utf-8')
HOURS = [
'08:15',
'09:15',
'10:30',
'11:30',
'12:30',
'13:45',
'14:45',
'16:00',
'17:00',
]
DAYS = {
0: 'Lundi',
1: 'Mardi',
2: 'Mercredi',
3: 'Jeudi',
4: 'Vendredi',
}
MONTHS = {
'janvier' : '01',
'février' : '02',
'mars' : '03',
'avril' : '04',
'mai' : '05',
'juin' : '06',
'juillet' : '07',
'aout' : '08',
'septembre': '09',
'octobre' : '10',
'novembre' : '11',
'décembre' : '12',
}
PROFS = {
'ADT': 'Alain Detaille',
'ARO': 'Anne Rousseau',
'ART': 'Anne Rayet',
'BDL': 'Bénoni Delfosse',
'BEJ': 'Jonas Beleho',
'CIH': 'Yashar Cihan',
'CLG': 'Christine Leignel',
'CLR': 'Catherine Leruste',
'CUV': 'Geneviève Cuvelier',
'DNA': 'David Nabet',
'DWI': 'Didier Willame',
'EFO': 'Eric Fontaine',
'EGR': 'Eric Georges',
'ELV': 'Eytan Levy',
'FPL': 'Frédéric Pluquet',
'GVA': 'Gilles Van Assche',
'HAL': 'Amine Hallal',
'JCJ': 'Jean-Claude Jaumain',
'JDM': 'Jacqueline De Mesmaeker',
'JDS': 'Jérôme Dossogne',
'JMA': 'Jean-Marc André',
'LBC': 'Laurent Beeckmans',
'MAP': 'Michel Applaincourt',
'MBA': 'Monica Bastreghi',
'MCD': 'Marco Codutti',
'MHI': 'Mohamed Hadjili',
'MWA': 'Moussa Wahid',
'MWI': 'Michel Willemse',
'NPX': 'Nicolas Pettiaux',
'NVS': 'Nicolas Vansteenkiste',
'PBT': 'Pierre Bettens',
'PMA': 'Pantelis Matsos',
'RPL': 'René-Philippe Legrand',
'SRV': 'Fréd\éric Servais',
'YPR': 'Yves Pierseaux',
}
| Python | 0 |
0726bc2cabd98639214e2cd14c49d30262e75d5e | Streamline setup of deCONZ button platform (#70593) | homeassistant/components/deconz/button.py | homeassistant/components/deconz/button.py | """Support for deCONZ buttons."""
from __future__ import annotations
from dataclasses import dataclass
from pydeconz.models.event import EventType
from pydeconz.models.scene import Scene as PydeconzScene
from homeassistant.components.button import (
DOMAIN,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .deconz_device import DeconzSceneMixin
from .gateway import DeconzGateway, get_gateway_from_config_entry
@dataclass
class DeconzButtonDescriptionMixin:
"""Required values when describing deCONZ button entities."""
suffix: str
button_fn: str
@dataclass
class DeconzButtonDescription(ButtonEntityDescription, DeconzButtonDescriptionMixin):
"""Class describing deCONZ button entities."""
ENTITY_DESCRIPTIONS = {
PydeconzScene: [
DeconzButtonDescription(
key="store",
button_fn="store",
suffix="Store Current Scene",
icon="mdi:inbox-arrow-down",
entity_category=EntityCategory.CONFIG,
)
]
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ button entity."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_scene(_: EventType, scene_id: str) -> None:
"""Add scene button from deCONZ."""
scene = gateway.api.scenes[scene_id]
async_add_entities(
DeconzButton(scene, gateway, description)
for description in ENTITY_DESCRIPTIONS.get(PydeconzScene, [])
)
config_entry.async_on_unload(
gateway.api.scenes.subscribe(
async_add_scene,
EventType.ADDED,
)
)
for scene_id in gateway.api.scenes:
async_add_scene(EventType.ADDED, scene_id)
class DeconzButton(DeconzSceneMixin, ButtonEntity):
"""Representation of a deCONZ button entity."""
TYPE = DOMAIN
def __init__(
self,
device: PydeconzScene,
gateway: DeconzGateway,
description: DeconzButtonDescription,
) -> None:
"""Initialize deCONZ number entity."""
self.entity_description: DeconzButtonDescription = description
super().__init__(device, gateway)
self._attr_name = f"{self._attr_name} {description.suffix}"
async def async_press(self) -> None:
"""Store light states into scene."""
async_button_fn = getattr(self._device, self.entity_description.button_fn)
await async_button_fn()
def get_device_identifier(self) -> str:
"""Return a unique identifier for this scene."""
return f"{super().get_device_identifier()}-{self.entity_description.key}"
| """Support for deCONZ buttons."""
from __future__ import annotations
from dataclasses import dataclass
from pydeconz.models.scene import Scene as PydeconzScene
from homeassistant.components.button import (
DOMAIN,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .deconz_device import DeconzSceneMixin
from .gateway import DeconzGateway, get_gateway_from_config_entry
@dataclass
class DeconzButtonDescriptionMixin:
"""Required values when describing deCONZ button entities."""
suffix: str
button_fn: str
@dataclass
class DeconzButtonDescription(ButtonEntityDescription, DeconzButtonDescriptionMixin):
"""Class describing deCONZ button entities."""
ENTITY_DESCRIPTIONS = {
PydeconzScene: [
DeconzButtonDescription(
key="store",
button_fn="store",
suffix="Store Current Scene",
icon="mdi:inbox-arrow-down",
entity_category=EntityCategory.CONFIG,
)
]
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ button entity."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_scene(scenes: list[PydeconzScene] | None = None) -> None:
"""Add scene button from deCONZ."""
entities = []
if scenes is None:
scenes = list(gateway.api.scenes.values())
for scene in scenes:
known_entities = set(gateway.entities[DOMAIN])
for description in ENTITY_DESCRIPTIONS.get(PydeconzScene, []):
new_entity = DeconzButton(scene, gateway, description)
if new_entity.unique_id not in known_entities:
entities.append(new_entity)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
gateway.signal_new_scene,
async_add_scene,
)
)
async_add_scene()
class DeconzButton(DeconzSceneMixin, ButtonEntity):
"""Representation of a deCONZ button entity."""
TYPE = DOMAIN
def __init__(
self,
device: PydeconzScene,
gateway: DeconzGateway,
description: DeconzButtonDescription,
) -> None:
"""Initialize deCONZ number entity."""
self.entity_description: DeconzButtonDescription = description
super().__init__(device, gateway)
self._attr_name = f"{self._attr_name} {description.suffix}"
async def async_press(self) -> None:
"""Store light states into scene."""
async_button_fn = getattr(self._device, self.entity_description.button_fn)
await async_button_fn()
def get_device_identifier(self) -> str:
"""Return a unique identifier for this scene."""
return f"{super().get_device_identifier()}-{self.entity_description.key}"
| Python | 0 |
afdc58945c710f623714e6b07c593489c0cd42be | Implement basic list command | src/xii/builtin/commands/list/list.py | src/xii/builtin/commands/list/list.py | import datetime
from xii import definition, command, error
from xii.need import NeedLibvirt, NeedSSH
class ListCommand(command.Command):
"""List all currently defined components
"""
name = ['list', 'ls']
help = "list all currently defined components"
@classmethod
def argument_parser(cls):
parser = command.Command.argument_parser(cls.name[0])
parser.add_argument("-d", "--definition", default=None,
help="Define which xii definition file should be used")
parser.add_argument("--all", default=False, action="store_true",
help="Show all components defined by the xii")
parser.add_argument("--host", default=None,
help="Specify host to connect to. (A libvirt url is required)")
parser.add_argument("--only", type=str, default=None,
help="Show only secified components [nodes,pools,networks]")
return parser
def _get_uptime(self, time):
now = datetime.datetime.now()
delta = now - datetime.datetime.fromtimestamp(time)
if delta.days > 1:
return "{} days".format(delta.days)
if delta.seconds / 3600 > 1:
return "{} hours".format(delta.seconds / 3600)
if delta.seconds / 60 > 1:
return "{} minutes".format(delta.seconds / 60)
return "{} seconds".format(delta.seconds)
def run(self):
rows = []
for c in self.children():
meta = c.fetch_metadata()
create = "---"
if meta is not None:
created_at = float(meta["created"])
create = self._get_uptime(created_at)
rows.append((c.entity(),
c.get_virt_url(),
create,
c.status()
))
self.show_table(["name", "host", "uptime", "status"], rows)
| from xii import definition, command, error
from xii.need import NeedLibvirt, NeedSSH
class ListCommand(command.Command):
"""List all currently defined components
"""
name = ['list', 'ls']
help = "list all currently defined components"
@classmethod
def argument_parser(cls):
parser = command.Command.argument_parser(cls.name[0])
parser.add_argument("-d", "--definition", default=None,
help="Define which xii definition file should be used")
parser.add_argument("--all", default=False, action="store_true",
help="Show all components defined by the xii")
parser.add_argument("--host", default=None,
help="Specify host to connect to. (A libvirt url is required)")
parser.add_argument("--only", type=str, default=None,
help="Show only secified components [nodes,pools,networks]")
return parser
def run(self):
pass
| Python | 0.999947 |
095ec4c38015f1b1b53cb88ae59fbf6a7596b492 | update VAF | mnist/training.py | mnist/training.py | # Copyright 2017 Max W. Y. Lam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
sys.path.append("../")
import os
import time
import tensorflow as tf
import tensorflow.contrib.layers as layers
from six.moves import range, zip
import numpy as np
import zhusuan as zs
import six
import gzip
from six.moves import cPickle as pickle
from expt import run_experiment
DATA_PATH = 'mnist.pkl.gz'
def load_data(n_folds):
def to_one_hot(x, depth):
ret = np.zeros((x.shape[0], depth))
ret[np.arange(x.shape[0]), x] = 1
return ret
f = gzip.open(path, 'rb')
if six.PY2:
train_set, valid_set, test_set = pickle.load(f)
else:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
X_train, y_train = train_set[0], train_set[1]
X_valid, y_valid = valid_set[0], valid_set[1]
X_test, y_test = test_set[0], test_set[1]
X_train = np.vstack([X_train, X_valid]).astype('float32')
y_train = np.vstack([y_train, y_valid])
return [X_train, to_one_hot(y_train, 10), X_test, to_one_hot(y_test, 10)]
if __name__ == '__main__':
if('cpu' in sys.argv):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
model_names = ['VAFNN', 'BNN']
train_test_set = load_data(5)
D, P = train_test_set[0][0].shape[1], train_test_set[0][1].shape[1]
# Fair Model Comparison - Same Architecture & Optimization Rule
training_settings = {
'task': 'classification',
'plot_err': True,
'lb_samples': 20,
'll_samples': 100,
'n_basis': 50,
'n_hiddens': [100],
'batch_size': 10,
'learn_rate': 1e-3,
'max_epochs': 10000,
'early_stop': 10,
'check_freq': 5,
}
eval_mses, eval_lls = run_experiment(
model_names, 'MNIST', load_data(5), **training_settings) | # Copyright 2017 Max W. Y. Lam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
sys.path.append("../")
import os
import time
import tensorflow as tf
import tensorflow.contrib.layers as layers
from six.moves import range, zip
import numpy as np
import zhusuan as zs
import six
from six.moves import cPickle as pickle
from expt import run_experiment
DATA_PATH = 'mnist.pkl.gz'
def load_data(n_folds):
def to_one_hot(x, depth):
ret = np.zeros((x.shape[0], depth))
ret[np.arange(x.shape[0]), x] = 1
return ret
f = gzip.open(path, 'rb')
if six.PY2:
train_set, valid_set, test_set = pickle.load(f)
else:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
X_train, y_train = train_set[0], train_set[1]
X_valid, y_valid = valid_set[0], valid_set[1]
X_test, y_test = test_set[0], test_set[1]
X_train = np.vstack([X_train, X_valid]).astype('float32')
y_train = np.vstack([y_train, y_valid])
return [X_train, to_one_hot(y_train, 10), X_test, to_one_hot(y_test, 10)]
if __name__ == '__main__':
if('cpu' in sys.argv):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
model_names = ['VAFNN', 'BNN']
train_test_set = load_data(5)
D, P = train_test_set[0][0].shape[1], train_test_set[0][1].shape[1]
# Fair Model Comparison - Same Architecture & Optimization Rule
training_settings = {
'task': 'classification',
'plot_err': True,
'lb_samples': 20,
'll_samples': 100,
'n_basis': 50,
'n_hiddens': [100],
'batch_size': 10,
'learn_rate': 1e-3,
'max_epochs': 10000,
'early_stop': 10,
'check_freq': 5,
}
eval_mses, eval_lls = run_experiment(
model_names, 'MNIST', load_data(5), **training_settings) | Python | 0 |
7a8aa79f191ed633babc1134238017c164b306f3 | Add optional rtsp_port for Foscam (#22786) | homeassistant/components/foscam/camera.py | homeassistant/components/foscam/camera.py | """
This component provides basic support for Foscam IP cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.foscam/
"""
import logging
import voluptuous as vol
from homeassistant.components.camera import (
Camera, PLATFORM_SCHEMA, SUPPORT_STREAM)
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_PORT)
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['libpyfoscam==1.0']
CONF_IP = 'ip'
CONF_RTSP_PORT = 'rtsp_port'
DEFAULT_NAME = 'Foscam Camera'
DEFAULT_PORT = 88
FOSCAM_COMM_ERROR = -8
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RTSP_PORT): cv.port
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Foscam IP Camera."""
add_entities([FoscamCam(config)])
class FoscamCam(Camera):
"""An implementation of a Foscam IP camera."""
def __init__(self, device_info):
"""Initialize a Foscam camera."""
from libpyfoscam import FoscamCamera
super(FoscamCam, self).__init__()
ip_address = device_info.get(CONF_IP)
port = device_info.get(CONF_PORT)
self._username = device_info.get(CONF_USERNAME)
self._password = device_info.get(CONF_PASSWORD)
self._name = device_info.get(CONF_NAME)
self._motion_status = False
self._foscam_session = FoscamCamera(
ip_address, port, self._username, self._password, verbose=False)
self._rtsp_port = device_info.get(CONF_RTSP_PORT)
if not self._rtsp_port:
result, response = self._foscam_session.get_port_info()
if result == 0:
self._rtsp_port = response.get('rtspPort') or \
response.get('mediaPort')
def camera_image(self):
"""Return a still image response from the camera."""
# Send the request to snap a picture and return raw jpg data
# Handle exception if host is not reachable or url failed
result, response = self._foscam_session.snap_picture_2()
if result == FOSCAM_COMM_ERROR:
return None
return response
@property
def supported_features(self):
"""Return supported features."""
if self._rtsp_port:
return SUPPORT_STREAM
return 0
@property
def stream_source(self):
"""Return the stream source."""
if self._rtsp_port:
return 'rtsp://{}:{}@{}:{}/videoMain'.format(
self._username,
self._password,
self._foscam_session.host,
self._rtsp_port)
return None
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return self._motion_status
def enable_motion_detection(self):
"""Enable motion detection in camera."""
try:
ret = self._foscam_session.enable_motion_detection()
self._motion_status = ret == FOSCAM_COMM_ERROR
except TypeError:
_LOGGER.debug("Communication problem")
self._motion_status = False
def disable_motion_detection(self):
"""Disable motion detection."""
try:
ret = self._foscam_session.disable_motion_detection()
self._motion_status = ret == FOSCAM_COMM_ERROR
except TypeError:
_LOGGER.debug("Communication problem")
self._motion_status = False
@property
def name(self):
"""Return the name of this camera."""
return self._name
| """
This component provides basic support for Foscam IP cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.foscam/
"""
import logging
import voluptuous as vol
from homeassistant.components.camera import (
Camera, PLATFORM_SCHEMA, SUPPORT_STREAM)
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_PORT)
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['libpyfoscam==1.0']
CONF_IP = 'ip'
DEFAULT_NAME = 'Foscam Camera'
DEFAULT_PORT = 88
FOSCAM_COMM_ERROR = -8
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Foscam IP Camera."""
add_entities([FoscamCam(config)])
class FoscamCam(Camera):
"""An implementation of a Foscam IP camera."""
def __init__(self, device_info):
"""Initialize a Foscam camera."""
from libpyfoscam import FoscamCamera
super(FoscamCam, self).__init__()
ip_address = device_info.get(CONF_IP)
port = device_info.get(CONF_PORT)
self._username = device_info.get(CONF_USERNAME)
self._password = device_info.get(CONF_PASSWORD)
self._name = device_info.get(CONF_NAME)
self._motion_status = False
self._foscam_session = FoscamCamera(
ip_address, port, self._username, self._password, verbose=False)
self._rtsp_port = None
result, response = self._foscam_session.get_port_info()
if result == 0:
self._rtsp_port = response.get('rtspPort') or \
response.get('mediaPort')
def camera_image(self):
"""Return a still image response from the camera."""
# Send the request to snap a picture and return raw jpg data
# Handle exception if host is not reachable or url failed
result, response = self._foscam_session.snap_picture_2()
if result == FOSCAM_COMM_ERROR:
return None
return response
@property
def supported_features(self):
"""Return supported features."""
if self._rtsp_port:
return SUPPORT_STREAM
return 0
@property
def stream_source(self):
"""Return the stream source."""
if self._rtsp_port:
return 'rtsp://{}:{}@{}:{}/videoMain'.format(
self._username,
self._password,
self._foscam_session.host,
self._rtsp_port)
return None
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return self._motion_status
def enable_motion_detection(self):
"""Enable motion detection in camera."""
try:
ret = self._foscam_session.enable_motion_detection()
self._motion_status = ret == FOSCAM_COMM_ERROR
except TypeError:
_LOGGER.debug("Communication problem")
self._motion_status = False
def disable_motion_detection(self):
"""Disable motion detection."""
try:
ret = self._foscam_session.disable_motion_detection()
self._motion_status = ret == FOSCAM_COMM_ERROR
except TypeError:
_LOGGER.debug("Communication problem")
self._motion_status = False
@property
def name(self):
"""Return the name of this camera."""
return self._name
| Python | 0 |
ec23d68af3cacefe39fd9e9f21f4cdfebe8f02e5 | update mime type when sending email | contact.py | contact.py | from __future__ import (
absolute_import,
print_function,
)
from collections import defaultdict
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from flask import render_template
import json
import requests
from subprocess import (
Popen,
PIPE,
)
from config import (
DOMAIN_NAME,
TELSTRA_CONSUMER_KEY,
TELSTRA_CONSUMER_SECRET,
YO_API_KEY,
)
from constants import (
CONTACT_TYPE_EMAIL,
CONTACT_TYPE_SMS,
CONTACT_TYPE_YO,
)
from dbhelper import get_redis
def send_alerts(alerts):
# organizes the alerts by contact info then sends one alert per contact info
klasses_by_contact = defaultdict(list)
for alert in alerts:
klasses_by_contact[(alert.contact, alert.contact_type)].append(alert.klass)
for contact, klasses in klasses_by_contact.iteritems():
contact, contact_type = contact
if contact_type == CONTACT_TYPE_EMAIL:
alert_by_email(contact, klasses)
elif contact_type == CONTACT_TYPE_SMS:
alert_by_sms(contact, klasses)
elif contact_type == CONTACT_TYPE_YO:
alert_by_yo(contact, klasses)
def create_alert_link(klass_ids):
return DOMAIN_NAME + '/alert?classids=' + ','.join(map(str, klass_ids))
def alert_by_email(email, klasses):
msg = MIMEMultipart('alternative')
msg['Subject'] = 'A spot has opened up in a class!'
msg['From'] = 'alert@' + DOMAIN_NAME
msg['To'] = email
text = "this is the text version of the email"
html = render_template('email.html')
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(part1)
msg.attach(part2)
pipe = Popen(['sendmail', '-f', 'alert@%s' % DOMAIN_NAME, '-t', email], stdin=PIPE).stdin
pipe.write(msg.as_string())
pipe.close()
def alert_by_sms(phone_number, klasses):
send_sms(phone_number,
"A spot has opened up in a class: %s" % create_alert_link(k.klass_id for k in klasses))
def alert_by_yo(username, klasses):
send_yo(username,
create_alert_link(k.klass_id for k in klasses),
'A spot has opened up in a class!')
def send_yo(username, link=None, text=None):
requests.post("http://api.justyo.co/yo/",
data={'api_token': YO_API_KEY,
'username': username,
'link': link,
'text': text})
# TODO: make sure this returns the right http code
def get_telstra_api_access_token():
access_token = get_redis().get('telstra_api_access_token')
if access_token is not None:
return access_token
r = requests.post('https://api.telstra.com/v1/oauth/token',
data={
'client_id': TELSTRA_CONSUMER_KEY,
'client_secret': TELSTRA_CONSUMER_SECRET,
'scope': 'SMS',
'grant_type': 'client_credentials'
}).json()
# TODO: make sure this returns the right http code
# cache the access token in redis, making it expire slightly earlier than it does on the Telstra server
get_redis().setex('telstra_api_access_token', int(r['expires_in']) - 60, r['access_token'])
return r['access_token']
def send_sms(phone_number, message):
access_token = get_telstra_api_access_token()
r = requests.post('https://api.telstra.com/v1/sms/messages',
headers={'Authorization': 'Bearer %s' % access_token},
data=json.dumps({
'to': phone_number,
'body': message
}))
# TODO: make sure this returns the right http code
| from __future__ import (
absolute_import,
print_function,
)
from collections import defaultdict
from flask import render_template
import json
import requests
from subprocess import (
Popen,
PIPE,
)
from config import (
DOMAIN_NAME,
TELSTRA_CONSUMER_KEY,
TELSTRA_CONSUMER_SECRET,
YO_API_KEY,
)
from constants import (
CONTACT_TYPE_EMAIL,
CONTACT_TYPE_SMS,
CONTACT_TYPE_YO,
)
from dbhelper import get_redis
def send_alerts(alerts):
# organizes the alerts by contact info then sends one alert per contact info
klasses_by_contact = defaultdict(list)
for alert in alerts:
klasses_by_contact[(alert.contact, alert.contact_type)].append(alert.klass)
for contact, klasses in klasses_by_contact.iteritems():
contact, contact_type = contact
if contact_type == CONTACT_TYPE_EMAIL:
alert_by_email(contact, klasses)
elif contact_type == CONTACT_TYPE_SMS:
alert_by_sms(contact, klasses)
elif contact_type == CONTACT_TYPE_YO:
alert_by_yo(contact, klasses)
def create_alert_link(klass_ids):
return DOMAIN_NAME + '/alert?classids=' + ','.join(map(str, klass_ids))
def alert_by_email(email, klasses):
email_body = 'Subject: A spot has opened up in a class!\n' + render_template('email.html')
pipe = Popen(['sendmail', '-f', 'alert@%s' % DOMAIN_NAME, '-t', email], stdin=PIPE).stdin
pipe.write(email_body)
pipe.close()
def alert_by_sms(phone_number, klasses):
send_sms(phone_number,
"A spot has opened up in a class: %s" % create_alert_link(k.klass_id for k in klasses))
def alert_by_yo(username, klasses):
send_yo(username,
create_alert_link(k.klass_id for k in klasses),
'A spot has opened up in a class!')
def send_yo(username, link=None, text=None):
requests.post("http://api.justyo.co/yo/",
data={'api_token': YO_API_KEY,
'username': username,
'link': link,
'text': text})
# TODO: make sure this returns the right http code
def get_telstra_api_access_token():
access_token = get_redis().get('telstra_api_access_token')
if access_token is not None:
return access_token
r = requests.post('https://api.telstra.com/v1/oauth/token',
data={
'client_id': TELSTRA_CONSUMER_KEY,
'client_secret': TELSTRA_CONSUMER_SECRET,
'scope': 'SMS',
'grant_type': 'client_credentials'
}).json()
# TODO: make sure this returns the right http code
# cache the access token in redis, making it expire slightly earlier than it does on the Telstra server
get_redis().setex('telstra_api_access_token', int(r['expires_in']) - 60, r['access_token'])
return r['access_token']
def send_sms(phone_number, message):
access_token = get_telstra_api_access_token()
r = requests.post('https://api.telstra.com/v1/sms/messages',
headers={'Authorization': 'Bearer %s' % access_token},
data=json.dumps({
'to': phone_number,
'body': message
}))
# TODO: make sure this returns the right http code
| Python | 0.000001 |
85537e3f8557a76b8b2ad89edc41848c29622c24 | Update the paint tool shape with the viewer image changes | skimage/viewer/plugins/labelplugin.py | skimage/viewer/plugins/labelplugin.py | import numpy as np
from .base import Plugin
from ..widgets import ComboBox, Slider
from ..canvastools import PaintTool
__all__ = ['LabelPainter']
rad2deg = 180 / np.pi
class LabelPainter(Plugin):
name = 'LabelPainter'
def __init__(self, max_radius=20, **kwargs):
super(LabelPainter, self).__init__(**kwargs)
# These widgets adjust plugin properties instead of an image filter.
self._radius_widget = Slider('radius', low=1, high=max_radius,
value=5, value_type='int', ptype='plugin')
labels = [str(i) for i in range(6)]
labels[0] = 'Erase'
self._label_widget = ComboBox('label', labels, ptype='plugin')
self.add_widget(self._radius_widget)
self.add_widget(self._label_widget)
print(self.help())
def help(self):
helpstr = ("Label painter",
"Hold left-mouse button and paint on canvas.")
return '\n'.join(helpstr)
def attach(self, image_viewer):
super(LabelPainter, self).attach(image_viewer)
image = image_viewer.original_image
self.paint_tool = PaintTool(self.image_viewer.ax, image.shape,
on_enter=self.on_enter)
self.paint_tool.radius = self.radius
self.paint_tool.label = self._label_widget.index = 1
self.artists.append(self.paint_tool)
def _on_new_image(self, image):
"""Update plugin for new images."""
self.paint_tool.shape = image.shape
def on_enter(self, overlay):
pass
@property
def radius(self):
return self._radius_widget.val
@radius.setter
def radius(self, val):
self.paint_tool.radius = val
@property
def label(self):
return self._label_widget.val
@label.setter
def label(self, val):
self.paint_tool.label = val
| import numpy as np
from .base import Plugin
from ..widgets import ComboBox, Slider
from ..canvastools import PaintTool
__all__ = ['LabelPainter']
rad2deg = 180 / np.pi
class LabelPainter(Plugin):
name = 'LabelPainter'
def __init__(self, max_radius=20, **kwargs):
super(LabelPainter, self).__init__(**kwargs)
# These widgets adjust plugin properties instead of an image filter.
self._radius_widget = Slider('radius', low=1, high=max_radius,
value=5, value_type='int', ptype='plugin')
labels = [str(i) for i in range(6)]
labels[0] = 'Erase'
self._label_widget = ComboBox('label', labels, ptype='plugin')
self.add_widget(self._radius_widget)
self.add_widget(self._label_widget)
print(self.help())
def help(self):
helpstr = ("Label painter",
"Hold left-mouse button and paint on canvas.")
return '\n'.join(helpstr)
def attach(self, image_viewer):
super(LabelPainter, self).attach(image_viewer)
image = image_viewer.original_image
self.paint_tool = PaintTool(self.image_viewer.ax, image.shape,
on_enter=self.on_enter)
self.paint_tool.radius = self.radius
self.paint_tool.label = self._label_widget.index = 1
self.artists.append(self.paint_tool)
def on_enter(self, overlay):
pass
@property
def radius(self):
return self._radius_widget.val
@radius.setter
def radius(self, val):
self.paint_tool.radius = val
@property
def label(self):
return self._label_widget.val
@label.setter
def label(self, val):
self.paint_tool.label = val
| Python | 0 |
8a6b100e671b4f22dee6b0399eb8a4bc8bf1a97e | update longdesc string | mriqc/info.py | mriqc/info.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
MRIQC
"""
__versionbase__ = '0.8.6'
__versionrev__ = 'a4'
__version__ = __versionbase__ + __versionrev__
__author__ = 'Oscar Esteban'
__email__ = 'code@oscaresteban.es'
__maintainer__ = 'Oscar Esteban'
__copyright__ = ('Copyright 2016, Center for Reproducible Neuroscience, '
'Stanford University')
__credits__ = 'Oscar Esteban'
__license__ = '3-clause BSD'
__status__ = 'Prototype'
__description__ = 'NR-IQMs (no-reference Image Quality Metrics) for MRI'
__longdesc__ = """\
MRIQC provides a series of image processing workflows to extract and compute a series of \
NR (no-reference), IQMs (image quality metrics) to be used in QAPs (quality assessment \
protocols) for MRI (magnetic resonance imaging).
This open-source neuroimaging data processing tool is being developed as a part of the \
MRI image analysis and reproducibility platform offered by the CRN. This pipeline derives \
from, and is heavily influenced by, the PCP Quality Assessment Protocol.
This tool extracts a series of IQMs from structural and functional MRI data. It is also \
scheduled to add diffusion MRI to the target imaging families.
"""
URL = 'http://mriqc.readthedocs.org/'
DOWNLOAD_URL = ('https://pypi.python.org/packages/source/m/mriqc/'
'mriqc-{}.tar.gz'.format(__version__))
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
]
REQUIRES = [
'numpy',
'future',
'lockfile',
'six',
'matplotlib',
'nibabel',
'niworkflows>=0.0.3a5',
'pandas',
'dipy',
'jinja2',
'seaborn',
'pyPdf2',
'PyYAML',
'nitime',
'nilearn',
'sklearn',
'scikit-learn'
]
LINKS_REQUIRES = [
'git+https://github.com/oesteban/nipype.git@master#egg=nipype',
'git+https://github.com/oesteban/rst2pdf.git@futurize/stage2#egg=rst2pdf'
]
TESTS_REQUIRES = [
'mock',
'codecov',
'nose',
'doctest-ignore-unicode'
]
EXTRA_REQUIRES = {
'doc': ['sphinx'],
'tests': TESTS_REQUIRES,
'duecredit': ['duecredit']
}
# Enable a handle to install all extra dependencies at once
EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
MRIQC
"""
__versionbase__ = '0.8.6'
__versionrev__ = 'a4'
__version__ = __versionbase__ + __versionrev__
__author__ = 'Oscar Esteban'
__email__ = 'code@oscaresteban.es'
__maintainer__ = 'Oscar Esteban'
__copyright__ = ('Copyright 2016, Center for Reproducible Neuroscience, '
'Stanford University')
__credits__ = 'Oscar Esteban'
__license__ = '3-clause BSD'
__status__ = 'Prototype'
__description__ = 'NR-IQMs (no-reference Image Quality Metrics) for MRI'
__longdesc__ = """
MRIQC provides a series of image processing workflows to extract and compute a series of
NR (no-reference), IQMs (image quality metrics) to be used in QAPs (quality assessment
protocols) for MRI (magnetic resonance imaging).
This open-source neuroimaging data processing tool is being developed as a part of the
MRI image analysis and reproducibility platform offered by the CRN. This pipeline derives
from, and is heavily influenced by, the PCP Quality Assessment Protocol.
This tool extracts a series of IQMs from structural and functional MRI data. It is also
scheduled to add diffusion MRI to the target imaging families.
"""
URL = 'http://mriqc.readthedocs.org/'
DOWNLOAD_URL = ('https://pypi.python.org/packages/source/m/mriqc/'
'mriqc-{}.tar.gz'.format(__version__))
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
]
REQUIRES = [
'numpy',
'future',
'lockfile',
'six',
'matplotlib',
'nibabel',
'niworkflows>=0.0.3a5',
'pandas',
'dipy',
'jinja2',
'seaborn',
'pyPdf2',
'PyYAML',
'nitime',
'nilearn',
'sklearn',
'scikit-learn'
]
LINKS_REQUIRES = [
'git+https://github.com/oesteban/nipype.git@master#egg=nipype',
'git+https://github.com/oesteban/rst2pdf.git@futurize/stage2#egg=rst2pdf'
]
TESTS_REQUIRES = [
'mock',
'codecov',
'nose',
'doctest-ignore-unicode'
]
EXTRA_REQUIRES = {
'doc': ['sphinx'],
'tests': TESTS_REQUIRES,
'duecredit': ['duecredit']
}
# Enable a handle to install all extra dependencies at once
EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
| Python | 0.000004 |
fb2c9469f6d026e77e0f8c20a12f4373e68f9ba2 | update dependency xgboost to v1 (#543) | training/xgboost/structured/base/setup.py | training/xgboost/structured/base/setup.py | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.4',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==1.5.0',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
| #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.4',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==0.81',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
| Python | 0 |
9aff0b8d5989bf11242ac30b718c23242631668e | call enable in RataryEncoder.__init__, fixed a few typos | libraries/RotaryEncoder/rotary_encoder.py | libraries/RotaryEncoder/rotary_encoder.py |
import os
from bbio.platform import sysfs
from bbio import addToCleanup, cape_manager, OCP_PATH, delay
class RotaryEncoder(object):
_eqep_dirs = [
'%s/48300000.epwmss/48300180.eqep' % OCP_PATH,
'%s/48302000.epwmss/48302180.eqep' % OCP_PATH,
'%s/48304000.epwmss/48304180.eqep' % OCP_PATH
]
EQEP0 = 0
EQEP1 = 1
EQEP2 = 2
EQEP2b = 3
def __init__(self, eqep_num):
assert 0 <= eqep_num <= 3 , "eqep_num must be between 0 and 3"
if eqep_num == 3:
overlay = 'bone_eqep2b'
eqep_num = 2
else:
overlay = 'bone_eqep%i' % eqep_num
assert os.path.exists("/lib/firmware/bone_eqep2b-00A0.dtbo"), \
"eQEP driver not present, update to a newer image to use the eQEP library"
cape_manager.load(overlay, auto_unload=False)
delay(250) # Give driver time to load
self.base_dir = self._eqep_dirs[eqep_num]
self.enable()
addToCleanup(self.disable)
def enable(self):
enable_file = "%s/enabled" % self.base_dir
return sysfs.kernelFilenameIO(enable_file, 1)
def disable(self):
enable_file = "%s/enabled" % self.base_dir
return sysfs.kernelFilenameIO(enable_file, 0)
def setAbsolute(self):
'''
Set mode as Absolute
'''
mode_file = "%s/mode" % self.base_dir
return sysfs.kernelFilenameIO(mode_file, 0)
def setRelative(self):
'''
Set mode as Relative
'''
mode_file = "%s/mode" % self.base_dir
return sysfs.kernelFilenameIO(mode_file, 1)
def getMode(self):
mode_file = "%s/mode" % self.base_dir
return sysfs.kernelFilenameIO(mode_file)
def getPosition(self):
'''
Get the current position of the encoder
'''
position_file = "%s/position" % self.base_dir
return sysfs.kernelFilenameIO(position_file)
def setFrequency(self,freq):
'''
Set the frequency in Hz at which the driver reports new positions.
'''
period_file = "%s/period" % self.base_dir
return sysfs.kernelFilenameIO(period_file,1000000000/freq)
def setPosition(self,val):
'''
Give a new value to the current position
'''
position_file = "%s/position" % self.base_dir
return sysfs.kernelFilenameIO(position_file,val)
def zero(self):
'''
Set the current position to 0
'''
return self.setPosition(0)
|
import os
from bbio.platform import sysfs
from bbio import addToCleanup, cape_manager, OCP_PATH, delay
class RotaryEncoder(object):
_eqep_dirs = [
'%s/48300000.epwmss/48300180.eqep' % OCP_PATH,
'%s/48302000.epwmss/48302180.eqep' % OCP_PATH,
'%s/48304000.epwmss/48304180.eqep' % OCP_PATH
]
EQEP0 = 0
EQEP1 = 1
EQEP2 = 2
EQEP2b = 3
def __init__(self, eqep_num):
assert 0 <= eqep_num <= 3 , "eqep_num must be between 0 and 3"
if eqep_num == 3:
overlay = 'bone_eqep2b'
eqep_num = 2
else:
overlay = 'bone_eqep%i' % eqep_num
assert os.path.exists("/lib/firmware/bone_eqep2b-00A0.dtbo"), \
"eQEP driver not present, update to a newer image to use the eQEP library"
cape_manager.load(overlay, auto_unload=False)
delay(250) # Give driver time to load
self.base_dir = self._eqep_dirs[eqep_num]
addToCleanup(self.disable)
def enable(self,m):
enable_file = "%s/enabled" % self.base_dir
return sysfs.kernelFilenameIO(enable_file, 1)
def disable(self):
enable_file = "%s/enabled" % self.base_dir
return sysfs.kernelFilenameIO(enable_file, 0)
def setAbsolute(self):
'''
Set mode as Absolute
'''
set_mode = "%s/mode" % self.base_dir
return sysfs.kernelFilenameIO(set_mode, 0)
def setRelative(self):
'''
Set mode as Relative
'''
set_mode = "%s/mode" % self.base_dir
return sysfs.kernelFilenameIO(enable_file, 1)
def getMode(self):
mode_file = "%s/enabled" % self.base_dir
return sysfs.kernelFilenameIO(mode_file)
def getPosition(self):
'''
Get the current position of the encoder
'''
position_file = "%s/position" % self.base_dir
return sysfs.kernelFilenameIO(position_file)
def setFrequency(self,freq):
'''
Set the frequency in Hz at which the driver reports new positions.
'''
period_file = "%s/period" % self.base_dir
return sysfs.kernelFilenameIO(period_file,1000000000/freq)
def setPosition(self,val):
'''
Give a new value to the current position
'''
position_file = "%s/position" % self.base_dir
return sysfs.kernelFilenameIO(position_file,val)
def zero(self):
'''
Set the current position to 0
'''
return self.setPosition(0)
| Python | 0.995994 |
06a1b635b02e001e798fa57e70a56ad17f9df7d0 | fix country cleanup migrate script 5 | portality/migrate/p1p2/country_cleanup.py | portality/migrate/p1p2/country_cleanup.py | import sys
from datetime import datetime
from portality import models
from portality import xwalk
def main(argv=sys.argv):
start = datetime.now()
journal_iterator = models.Journal.all_in_doaj()
counter = 0
for j in journal_iterator:
counter += 1
oldcountry = j.bibjson().country
j.bibjson().country = xwalk.get_country_code(j.bibjson().country)
newcountry = j.bibjson().country
print j.bibjson().title.encode('utf-8'), ',', j.bibjson().get_one_identifier(j.bibjson().P_ISSN), j.bibjson().get_one_identifier(j.bibjson().E_ISSN), ',', 'Old country:', oldcountry.encode('utf-8'), ',', 'New country:', newcountry.encode('utf-8')
j.prep()
j.save()
end = datetime.now()
print "Updated Journals", counter
print start, end
print 'Time taken:', end-start
if __name__ == '__main__':
main()
| import sys
from datetime import datetime
from portality import models
from portality import xwalk
def main(argv=sys.argv):
start = datetime.now()
journal_iterator = models.Journal.all_in_doaj()
counter = 0
for j in journal_iterator:
counter += 1
oldcountry = j.bibjson().country
j.bibjson().country = xwalk.get_country_code(j.bibjson().country)
newcountry = j.bibjson().country
print j.bibjson().title.decode('utf-8'), ',', j.bibjson().get_one_identifier(j.bibjson().P_ISSN), j.bibjson().get_one_identifier(j.bibjson().E_ISSN), ',', 'Old country:', oldcountry.decode('utf-8'), ',', 'New country:', newcountry.decode('utf-8')
j.prep()
j.save()
end = datetime.now()
print "Updated Journals", counter
print start, end
print 'Time taken:', end-start
if __name__ == '__main__':
main()
| Python | 0.000001 |
042446c8394794255471d784e041c1d9c4ef0752 | Update example config | calplus/conf/providers.py | calplus/conf/providers.py | """Provider Configuration"""
from oslo_config import cfg
# Openstack Authenticate Configuration.
openstack_group = cfg.OptGroup('openstack',
title='OpenStack Hosts')
openstack_opts = [
cfg.StrOpt('driver_name',
default='OpenStackHUST'),
cfg.StrOpt('type_driver',
default='openstack'),
cfg.StrOpt('os_auth_url',
default='localhost'),
cfg.StrOpt('os_project_name',
default='admin'),
cfg.StrOpt('os_username',
default='admin'),
cfg.StrOpt('os_password',
default='ADMIN_PASS'),
cfg.StrOpt('os_project_domain_name',
default='default'),
cfg.StrOpt('os_user_domain_name',
default='default'),
cfg.IntOpt('os_identity_api_version',
default='3'),
cfg.IntOpt('os_image_api_version',
default='2'),
cfg.StrOpt('tenant_id',
default=''),
cfg.StrOpt('os_novaclient_version',
default='2.1'),
cfg.DictOpt('limit',
default={
"subnet": 10,
"network": 10,
"floatingip": 50,
"subnetpool": -1,
"security_group_rule": 100,
"security_group": 10,
"router": 10,
"rbac_policy": -1,
"port": 50
})
]
# Amazon Authenticate Configuration.
amazon_group = cfg.OptGroup('amazon',
title='Amazon Hosts')
amazon_opts = [
cfg.StrOpt('driver_name',
default='AmazonHUSTACC'),
cfg.StrOpt('type_driver',
default='amazon'),
cfg.StrOpt('aws_access_key_id',
default='AWS_ACCESS_KEY_ID'),
cfg.StrOpt('aws_secret_access_key',
default='AWS_SECRET_ACCESS_KEY'),
cfg.StrOpt('region_name',
default='us-east-1'),
cfg.StrOpt('endpoint_url',
default='http://localhost:8788'),
cfg.DictOpt('limit',
default={
"subnet": 10,
"vpc": 5,
"floatingip": 50,
"subnetpool": -1,
"security_group_rule": 100,
"security_group": 10,
"router": 10,
"rbac_policy": -1,
"port": 50
})
]
#Provider Configuration
provider_group = cfg.OptGroup('providers',
title='Supported Providers')
enable_drivers = cfg.ListOpt(
'enable_drivers',
default=[
openstack_group.name,
amazon_group.name
],
help='List of available Driver Hosts'
)
driver_mapper = cfg.DictOpt('driver_mapper',
default={
'openstack': 'OpenstackDriver',
'amazon': 'AmazonDriver',
},
help="""
Dict with key is provider, and value is
Driver class.
""")
provider_opts = [
driver_mapper,
enable_drivers
]
def register_opts(conf):
conf.register_group(provider_group)
conf.register_opts(provider_opts, group=provider_group)
conf.register_group(openstack_group)
conf.register_opts(openstack_opts, group=openstack_group)
conf.register_group(amazon_group)
conf.register_opts(amazon_opts, group=amazon_group)
def list_opts():
return {
provider_group: provider_opts,
openstack_group: openstack_opts,
amazon_group: amazon_opts,
}
| """Provider Configuration"""
from oslo_config import cfg
# Openstack Authenticate Configuration.
openstack_group = cfg.OptGroup('openstack1',
title='OpenStack Hosts')
openstack_opts = [
cfg.StrOpt('driver_name',
default='OpenStackHUST'),
cfg.StrOpt('type_driver',
default='openstack'),
cfg.StrOpt('os_auth_url',
default='localhost'),
cfg.StrOpt('os_project_name',
default='admin'),
cfg.StrOpt('os_username',
default='admin'),
cfg.StrOpt('os_password',
default='ADMIN_PASS'),
cfg.StrOpt('os_project_domain_name',
default='default'),
cfg.StrOpt('os_user_domain_name',
default='default'),
cfg.IntOpt('os_identity_api_version',
default='3'),
cfg.IntOpt('os_image_api_version',
default='2'),
cfg.StrOpt('tenant_id',
default=''),
cfg.StrOpt('os_novaclient_version',
default='2.1'),
cfg.DictOpt('limit',
default={
"subnet": 10,
"network": 10,
"floatingip": 50,
"subnetpool": -1,
"security_group_rule": 100,
"security_group": 10,
"router": 10,
"rbac_policy": -1,
"port": 50
})
]
# Amazon Authenticate Configuration.
amazon_group = cfg.OptGroup('amazon1',
title='Amazon Hosts')
amazon_opts = [
cfg.StrOpt('driver_name',
default='AmazonHUSTACC'),
cfg.StrOpt('type_driver',
default='amazon'),
cfg.StrOpt('aws_access_key_id',
default='localhost'),
cfg.StrOpt('aws_secret_access_key',
default='admin'),
cfg.StrOpt('region_name',
default='localhost'),
cfg.StrOpt('endpoint_url',
default='http://localhost:35357/v3/'),
cfg.DictOpt('limit',
default={
"subnet": 10,
"vpc": 5,
"floatingip": 50,
"subnetpool": -1,
"security_group_rule": 100,
"security_group": 10,
"router": 10,
"rbac_policy": -1,
"port": 50
})
]
#Provider Configuration
provider_group = cfg.OptGroup('providers',
title='Supported Providers')
enable_drivers = cfg.ListOpt(
'enable_drivers',
default=[
openstack_group.name,
amazon_group.name
],
help='List of available Driver Hosts'
)
driver_mapper = cfg.DictOpt('driver_mapper',
default={
'openstack': 'OpenstackDriver',
'amazon': 'AmazonDriver',
},
help="""
Dict with key is provider, and value is
Driver class.
""")
provider_opts = [
driver_mapper,
enable_drivers
]
def register_opts(conf):
conf.register_group(provider_group)
conf.register_opts(provider_opts, group=provider_group)
conf.register_group(openstack_group)
conf.register_opts(openstack_opts, group=openstack_group)
conf.register_group(amazon_group)
conf.register_opts(amazon_opts, group=amazon_group)
def list_opts():
return {
provider_group: provider_opts,
openstack_group: openstack_opts,
amazon_group: amazon_opts,
}
| Python | 0.000001 |
ccaca70aa28bdd3e4f2a9c6e46d76e3ff8653f88 | Fix public page hashids issue | crestify/views/public.py | crestify/views/public.py | from crestify import app, hashids
from crestify.models import Bookmark
from flask import render_template
@app.route('/public/<string:bookmark_id>', methods=['GET'])
def bookmark_public(bookmark_id):
bookmark_id = hashids.decode(str(bookmark_id))[0]
query = Bookmark.query.get(bookmark_id)
return render_template("public/bookmark_share.html", bookmark=query)
| from crestify import app, hashids
from crestify.models import Bookmark
from flask import render_template
@app.route('/public/<string:bookmark_id>', methods=['GET'])
def bookmark_public(bookmark_id):
bookmark_id = hashids.decode(bookmark_id)[0]
query = Bookmark.query.get(bookmark_id)
return render_template("public/bookmark_share.html", bookmark=query)
| Python | 0 |
bfdf4bffdb30e6f9651c96afb711d2a871b9ff87 | fix output to shell | create_recipes.py | create_recipes.py | import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
msg = "Succesfully created conda recipe for %s\n" % (package)
successes.append(package)
else:
msg = "Failed to create conda recipe for %s\n" % (package)
failures.append(package)
print(msg)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
| import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
recipe_log_file.write(msg)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
successes.append(package)
else:
failures.append(package)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
| Python | 0.000023 |
21000dfd4bf63ceae0e8c6ac343624fbf5c5bea2 | read tags before people | cat/test_cat.py | cat/test_cat.py | from cat.code import GenerateSite
import unittest
import json
import os
import sys
def read_json(file):
with open(file) as fh:
return json.loads(fh.read())
#return fh.read()
class TestDemo(unittest.TestCase):
def test_generate(self):
GenerateSite().generate_site()
assert True
# This fails on travis, we probably need better reporting to see what is the actual difference
# as I cannot see it. Unless it is only the file_date
files = [
'html/v/yougottalovefrontend-2016/vitaly-friedman-cutting-edge-responsive-web-design.json',
'html/p/zohar-babin.json',
]
for result_file in files:
expected_file = 'samples/' + os.path.basename(result_file)
#sys.stderr.write(result_file)
#sys.stderr.write("\n")
#sys.stderr.write(expected_file)
#sys.stderr.write("\n")
# read both files
result = read_json(result_file)
expected = read_json(expected_file)
if 'file_date' in expected:
del(expected['file_date'])
del(result['file_date'])
if result != expected:
print("While testing {}\n".format(result_file))
print("Expected: {}".format(expected))
print("Received: {}".format(result))
assert result == expected
def test_videos(self):
gs = GenerateSite()
gs.read_videos()
report = gs.check_videos()
sys.stderr.write(report)
assert report == ''
def test_people(self):
gs = GenerateSite()
gs.read_tags()
gs.read_people()
report = gs.check_people()
sys.stderr.write(report)
assert report == ''
# vim: expandtab
| from cat.code import GenerateSite
import unittest
import json
import os
import sys
def read_json(file):
with open(file) as fh:
return json.loads(fh.read())
#return fh.read()
class TestDemo(unittest.TestCase):
def test_generate(self):
GenerateSite().generate_site()
assert True
# This fails on travis, we probably need better reporting to see what is the actual difference
# as I cannot see it. Unless it is only the file_date
files = [
'html/v/yougottalovefrontend-2016/vitaly-friedman-cutting-edge-responsive-web-design.json',
'html/p/zohar-babin.json',
]
for result_file in files:
expected_file = 'samples/' + os.path.basename(result_file)
#sys.stderr.write(result_file)
#sys.stderr.write("\n")
#sys.stderr.write(expected_file)
#sys.stderr.write("\n")
# read both files
result = read_json(result_file)
expected = read_json(expected_file)
if 'file_date' in expected:
del(expected['file_date'])
del(result['file_date'])
if result != expected:
print("While testing {}\n".format(result_file))
print("Expected: {}".format(expected))
print("Received: {}".format(result))
assert result == expected
def test_videos(self):
gs = GenerateSite()
gs.read_videos()
report = gs.check_videos()
sys.stderr.write(report)
assert report == ''
def test_people(self):
gs = GenerateSite()
gs.read_people()
report = gs.check_people()
sys.stderr.write(report)
assert report == ''
# vim: expandtab
| Python | 0 |
b220af1b5219c59735bd1f35493b0a659c627738 | Fix cookie handling for tornado | social/strategies/tornado_strategy.py | social/strategies/tornado_strategy.py | import json
from tornado.template import Loader, Template
from social.utils import build_absolute_uri
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class TornadoTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
path, tpl = tpl.rsplit('/', 1)
return Loader(path).load(tpl).generate(**context)
def render_string(self, html, context):
return Template(html).generate(**context)
class TornadoStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = TornadoTemplateStrategy
def __init__(self, storage, request_handler, tpl=None):
self.request_handler = request_handler
self.request = self.request_handler.request
super(TornadoStrategy, self).__init__(storage, tpl)
def get_setting(self, name):
return self.request_handler.settings[name]
def request_data(self, merge=True):
# Multiple valued arguments not supported yet
return dict((key, val[0])
for key, val in self.request.arguments.iteritems())
def request_host(self):
return self.request.host
def redirect(self, url):
return self.request_handler.redirect(url)
def html(self, content):
self.request_handler.write(content)
def session_get(self, name, default=None):
value = self.request_handler.get_secure_cookie(name)
if value:
return json.loads(value.decode())
return default
def session_set(self, name, value):
self.request_handler.set_secure_cookie(name, json.dumps(value).encode())
def session_pop(self, name):
value = self.session_get(name)
self.request_handler.clear_cookie(name)
return value
def session_setdefault(self, name, value):
pass
def build_absolute_uri(self, path=None):
return build_absolute_uri('{0}://{1}'.format(self.request.protocol,
self.request.host),
path)
def partial_to_session(self, next, backend, request=None, *args, **kwargs):
return json.dumps(super(TornadoStrategy, self).partial_to_session(
next, backend, request=request, *args, **kwargs
))
def partial_from_session(self, session):
if session:
return super(TornadoStrategy, self).partial_to_session(
json.loads(session)
)
| import json
from tornado.template import Loader, Template
from social.utils import build_absolute_uri
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class TornadoTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
path, tpl = tpl.rsplit('/', 1)
return Loader(path).load(tpl).generate(**context)
def render_string(self, html, context):
return Template(html).generate(**context)
class TornadoStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = TornadoTemplateStrategy
def __init__(self, storage, request_handler, tpl=None):
self.request_handler = request_handler
self.request = self.request_handler.request
super(TornadoStrategy, self).__init__(storage, tpl)
def get_setting(self, name):
return self.request_handler.settings[name]
def request_data(self, merge=True):
# Multiple valued arguments not supported yet
return dict((key, val[0])
for key, val in self.request.arguments.iteritems())
def request_host(self):
return self.request.host
def redirect(self, url):
return self.request_handler.redirect(url)
def html(self, content):
self.request_handler.write(content)
def session_get(self, name, default=None):
return self.request_handler.get_secure_cookie(name) or default
def session_set(self, name, value):
self.request_handler.set_secure_cookie(name, str(value))
def session_pop(self, name):
value = self.request_handler.get_secure_cookie(name)
self.request_handler.set_secure_cookie(name, '')
return value
def session_setdefault(self, name, value):
pass
def build_absolute_uri(self, path=None):
return build_absolute_uri('{0}://{1}'.format(self.request.protocol,
self.request.host),
path)
def partial_to_session(self, next, backend, request=None, *args, **kwargs):
return json.dumps(super(TornadoStrategy, self).partial_to_session(
next, backend, request=request, *args, **kwargs
))
def partial_from_session(self, session):
if session:
return super(TornadoStrategy, self).partial_to_session(
json.loads(session)
)
| Python | 0.000001 |
c5db8af5faca762e574a5b3b6117a0253e59cd05 | use new urls module | couchexport/urls.py | couchexport/urls.py | from django.conf.urls import *
urlpatterns = patterns('',
url(r'^model/$', 'couchexport.views.export_data', name='model_download_excel'),
url(r'^async/$', 'couchexport.views.export_data_async', name='export_data_async'),
url(r'^saved/(?P<export_id>[\w-]+)/$', 'couchexport.views.download_saved_export',
name='couchexport_download_saved_export'),
)
| from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^model/$', 'couchexport.views.export_data', name='model_download_excel'),
url(r'^async/$', 'couchexport.views.export_data_async', name='export_data_async'),
url(r'^saved/(?P<export_id>[\w-]+)/$', 'couchexport.views.download_saved_export',
name='couchexport_download_saved_export'),
)
| Python | 0.000001 |
b68da6c5b64009dbd2d53206be4c8d98ed1b0a45 | Add print option to exercise_oaipmh.py | librisxl-tools/scripts/exercise_oaipmh.py | librisxl-tools/scripts/exercise_oaipmh.py | import requests
from lxml import etree
from StringIO import StringIO
import time
PMH = "{http://www.openarchives.org/OAI/2.0/}"
def parse_oaipmh(start_url, name, passwd, do_print=False):
start_time = time.time()
resumption_token = None
record_count = 0
while True:
url = make_next_url(start_url, resumption_token)
res = requests.get(url, auth=(name, passwd), stream=True, timeout=3600)
if do_print:
data = res.raw.read()
print data
source = StringIO(data)
else:
source = res.raw
record_root = etree.parse(source)
record_count += len(record_root.findall("{0}ListRecords/{0}record".format(PMH)))
resumption_token = record_root.findtext("{0}ListRecords/{0}resumptionToken".format(PMH))
elapsed = time.time() - start_time
print "Record count: %s. Got resumption token: %s. Elapsed time: %s. Records/second: %s" % (record_count, resumption_token, elapsed, record_count / elapsed)
if not resumption_token:
break
def make_next_url(base_url, resumption_token=None):
params = "?verb=ListRecords&resumptionToken=%s" % resumption_token if resumption_token else "?verb=ListRecords&metadataPrefix=marcxml"
return base_url + params
if __name__ == '__main__':
from sys import argv
args = argv[1:]
if '-p' in args:
args.remove('-p')
do_print = True
else:
do_print = False
if not args:
print "Usage: %s OAI_PMH_URL [NAME, PASSWORD] [-p]" % argv[0]
exit()
start_url = args.pop(0)
if args:
name, passwd = args[:2]
else:
name, passwd = None, None
parse_oaipmh(start_url, name, passwd, do_print)
| import requests
from lxml import etree
import time
PMH = "{http://www.openarchives.org/OAI/2.0/}"
def parse_oaipmh(start_url, name, passwd):
start_time = time.time()
resumption_token = None
record_count = 0
while True:
url = make_next_url(start_url, resumption_token)
res = requests.get(url, auth=(name, passwd), stream=True, timeout=3600)
record_root = etree.parse(res.raw)
record_count += len(record_root.findall("{0}ListRecords/{0}record".format(PMH)))
resumption_token = record_root.findtext("{0}ListRecords/{0}resumptionToken".format(PMH))
elapsed = time.time() - start_time
print "Record count: %s. Got resumption token: %s. Elapsed time: %s. Records/second: %s" % (record_count, resumption_token, elapsed, record_count / elapsed)
if not resumption_token:
break
def make_next_url(base_url, resumption_token=None):
params = "?verb=ListRecords&resumptionToken=%s" % resumption_token if resumption_token else "?verb=ListRecords&metadataPrefix=marcxml"
return base_url + params
if __name__ == '__main__':
from sys import argv
args = argv[1:]
start_url = (args.pop(0) if len(args) == 3
else "http://data.libris.kb.se/hold/oaipmh")
name, passwd = args[:2]
parse_oaipmh(start_url, name, passwd)
| Python | 0.000005 |
a8104d2765ef97b698f108192dfc0b334498151a | Add ability to look up other rietveld instances | my_reviews.py | my_reviews.py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get rietveld stats.
Example:
- my_reviews.py -o me@chromium.org -Q for stats for last quarter.
"""
import datetime
import optparse
import os
import sys
import rietveld
def print_reviews(owner, reviewer, created_after, created_before, instance_url):
"""Prints issues with the filter.
Set with_messages=True to search() call bellow if you want each message too.
If you only want issue numbers, use keys_only=True in the search() call.
You can then use remote.get_issue_properties(issue, True) to get the data per
issue.
"""
remote = rietveld.Rietveld(instance_url, None, None)
# See def search() in rietveld.py to see all the filters you can use.
for issue in remote.search(
owner=owner,
reviewer=reviewer,
created_after=created_after,
created_before=created_before,
keys_only=False,
with_messages=False,
):
# By default, hide commit-bot and the domain.
reviewers = set(r.split('@', 1)[0] for r in issue['reviewers'])
reviewers -= set(('commit-bot',))
# Strip time.
timestamp = issue['created'][:10]
# More information is available, print issue.keys() to see them.
print '%d: %s %s' % (issue['issue'], timestamp, ', '.join(reviewers))
def get_previous_quarter(today):
"""There are four quarters, 01-03, 04-06, 07-09, 10-12.
If today is in the last month of a quarter, assume it's the current quarter
that is requested.
"""
year = today.year
month = today.month - (today.month % 3)
if not month:
month = 12
year -= 1
previous_month = month - 2
return (
'%d-%02d-01' % (year, previous_month),
'%d-%02d-01' % (year, month))
def main():
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-o', '--owner')
parser.add_option('-r', '--reviewer')
parser.add_option('-c', '--created_after')
parser.add_option('-C', '--created_before')
parser.add_option('-Q', '--last_quarter', action='store_true')
parser.add_option('-i', '--instance_url', default='codereview.chromium.org')
# Remove description formatting
parser.format_description = lambda x: parser.description
options, args = parser.parse_args()
if args:
parser.error('Args unsupported')
if not options.owner and not options.reviewer:
options.owner = os.environ['EMAIL_ADDRESS']
if '@' not in options.owner:
parser.error('Please specify at least -o or -r')
print 'Defaulting to owner=%s' % options.owner
if options.last_quarter:
today = datetime.date.today()
options.created_after, options.created_before = get_previous_quarter(today)
print 'Using range %s to %s' % (
options.created_after, options.created_before)
print_reviews(
options.owner, options.reviewer,
options.created_after, options.created_before,
options.instance_url)
return 0
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get rietveld stats.
Example:
- my_reviews.py -o me@chromium.org -Q for stats for last quarter.
"""
import datetime
import optparse
import os
import sys
import rietveld
def print_reviews(owner, reviewer, created_after, created_before):
"""Prints issues with the filter.
Set with_messages=True to search() call bellow if you want each message too.
If you only want issue numbers, use keys_only=True in the search() call.
You can then use remote.get_issue_properties(issue, True) to get the data per
issue.
"""
instance_url = 'codereview.chromium.org'
remote = rietveld.Rietveld(instance_url, None, None)
# See def search() in rietveld.py to see all the filters you can use.
for issue in remote.search(
owner=owner,
reviewer=reviewer,
created_after=created_after,
created_before=created_before,
keys_only=False,
with_messages=False,
):
# By default, hide commit-bot and the domain.
reviewers = set(r.split('@', 1)[0] for r in issue['reviewers'])
reviewers -= set(('commit-bot',))
# Strip time.
timestamp = issue['created'][:10]
# More information is available, print issue.keys() to see them.
print '%d: %s %s' % (issue['issue'], timestamp, ', '.join(reviewers))
def get_previous_quarter(today):
"""There are four quarters, 01-03, 04-06, 07-09, 10-12.
If today is in the last month of a quarter, assume it's the current quarter
that is requested.
"""
year = today.year
month = today.month - (today.month % 3)
if not month:
month = 12
year -= 1
previous_month = month - 2
return (
'%d-%02d-01' % (year, previous_month),
'%d-%02d-01' % (year, month))
def main():
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-o', '--owner')
parser.add_option('-r', '--reviewer')
parser.add_option('-c', '--created_after')
parser.add_option('-C', '--created_before')
parser.add_option('-Q', '--last_quarter', action='store_true')
# Remove description formatting
parser.format_description = lambda x: parser.description
options, args = parser.parse_args()
if args:
parser.error('Args unsupported')
if not options.owner and not options.reviewer:
options.owner = os.environ['EMAIL_ADDRESS']
if '@' not in options.owner:
parser.error('Please specify at least -o or -r')
print 'Defaulting to owner=%s' % options.owner
if options.last_quarter:
today = datetime.date.today()
options.created_after, options.created_before = get_previous_quarter(today)
print 'Using range %s to %s' % (
options.created_after, options.created_before)
print_reviews(
options.owner, options.reviewer,
options.created_after, options.created_before)
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000001 |
f4408cb2feb5a28a5117fefebe782a61ea80de96 | fix res_company | hr_employee_time_clock/models/__init__.py | hr_employee_time_clock/models/__init__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import hr_timesheet_sheet
from . import hr_attendance
from . import hr_holidays_public
from . import employee_attendance_analytic
from . import resource_calendar
from . import hr_holidays
from . import account_analytic_line
from . import hr_department
from . import hr_employee
from . import hr_timesheet_sheet_day
from . import hr_timesheet_sheet_account
from . import res_company
from . import hr_contract
# from . import res_config_settings
from . import res_users
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import hr_timesheet_sheet
from . import hr_attendance
from . import hr_holidays_public
from . import employee_attendance_analytic
from . import resource_calendar
from . import hr_holidays
from . import account_analytic_line
from . import hr_department
from . import hr_employee
from . import hr_timesheet_sheet_day
from . import hr_timesheet_sheet_account
# from . import res_company
from . import hr_contract
# from . import res_config_settings
from . import res_users
| Python | 0.000001 |
f71a4ca03b8c7c63816bab57a71f9d28a7139e2d | Add justification for utility method as comment. | contrib/vcloud/vcloud_util.py | contrib/vcloud/vcloud_util.py | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import collections
import os
import sys
import benchexec.util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def parse_vcloud_run_result(values):
result_values = collections.OrderedDict()
def parse_time_value(s):
if s[-1] != "s":
raise ValueError('Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
def set_exitcode(new):
if "exitcode" in result_values:
old = result_values["exitcode"]
assert (
old == new
), "Inconsistent exit codes {} and {} from VerifierCloud".format(old, new)
else:
result_values["exitcode"] = new
for key, value in values:
value = value.strip()
if key in ["cputime", "walltime"]:
result_values[key] = parse_time_value(value)
elif key == "memory":
result_values["memory"] = int(value.strip("B"))
elif key == "exitcode":
set_exitcode(benchexec.util.ProcessExitCode.from_raw(int(value)))
elif key == "returnvalue":
set_exitcode(benchexec.util.ProcessExitCode.create(value=int(value)))
elif key == "exitsignal":
set_exitcode(benchexec.util.ProcessExitCode.create(signal=int(value)))
elif (
key in ["host", "terminationreason", "cpuCores", "memoryNodes", "starttime"]
or key.startswith("blkio-")
or key.startswith("cpuenergy")
or key.startswith("energy-")
or key.startswith("cputime-cpu")
):
result_values[key] = value
elif key not in ["command", "timeLimit", "coreLimit", "memoryLimit"]:
result_values["vcloud-" + key] = value
return result_values
def parse_frequency_value(s):
# Contrary to benchexec.util.parse_frequency_value, this handles float values.
if not s:
return s
s = s.strip()
pos = len(s)
while pos and not s[pos - 1].isdigit():
pos -= 1
number = float(s[:pos])
unit = s[pos:].strip()
if not unit or unit == "Hz":
return int(number)
elif unit == "kHz":
return int(number * 1000)
elif unit == "MHz":
return int(number * 1000 * 1000)
elif unit == "GHz":
return int(number * 1000 * 1000 * 1000)
else:
raise ValueError(
"unknown unit: {} (allowed are Hz, kHz, MHz, and GHz)".format(unit)
)
def is_windows():
return os.name == "nt"
def force_linux_path(path):
if is_windows():
return path.replace("\\", "/")
return path
| # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import collections
import os
import sys
import benchexec.util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def parse_vcloud_run_result(values):
result_values = collections.OrderedDict()
def parse_time_value(s):
if s[-1] != "s":
raise ValueError('Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
def set_exitcode(new):
if "exitcode" in result_values:
old = result_values["exitcode"]
assert (
old == new
), "Inconsistent exit codes {} and {} from VerifierCloud".format(old, new)
else:
result_values["exitcode"] = new
for key, value in values:
value = value.strip()
if key in ["cputime", "walltime"]:
result_values[key] = parse_time_value(value)
elif key == "memory":
result_values["memory"] = int(value.strip("B"))
elif key == "exitcode":
set_exitcode(benchexec.util.ProcessExitCode.from_raw(int(value)))
elif key == "returnvalue":
set_exitcode(benchexec.util.ProcessExitCode.create(value=int(value)))
elif key == "exitsignal":
set_exitcode(benchexec.util.ProcessExitCode.create(signal=int(value)))
elif (
key in ["host", "terminationreason", "cpuCores", "memoryNodes", "starttime"]
or key.startswith("blkio-")
or key.startswith("cpuenergy")
or key.startswith("energy-")
or key.startswith("cputime-cpu")
):
result_values[key] = value
elif key not in ["command", "timeLimit", "coreLimit", "memoryLimit"]:
result_values["vcloud-" + key] = value
return result_values
def parse_frequency_value(s):
if not s:
return s
s = s.strip()
pos = len(s)
while pos and not s[pos - 1].isdigit():
pos -= 1
number = float(s[:pos])
unit = s[pos:].strip()
if not unit or unit == "Hz":
return int(number)
elif unit == "kHz":
return int(number * 1000)
elif unit == "MHz":
return int(number * 1000 * 1000)
elif unit == "GHz":
return int(number * 1000 * 1000 * 1000)
else:
raise ValueError(
"unknown unit: {} (allowed are Hz, kHz, MHz, and GHz)".format(unit)
)
def is_windows():
return os.name == "nt"
def force_linux_path(path):
if is_windows():
return path.replace("\\", "/")
return path
| Python | 0 |
96158b6b5a153db6b9a5e5d40699efefc728a9b3 | Make our LiveWidget handle a 'topics' property along with 'topic' | moksha/api/widgets/live/live.py | moksha/api/widgets/live/live.py | # This file is part of Moksha.
# Copyright (C) 2008-2009 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luke Macken <lmacken@redhat.com>
import moksha
from tw.api import Widget
from moksha.exc import MokshaException
from moksha.api.widgets.stomp import StompWidget, stomp_subscribe, stomp_unsubscribe
class LiveWidget(Widget):
""" A live streaming widget.
This widget handles automatically subscribing your widget to any given
topics, and registers all of the stomp callbacks.
"""
engine_name = 'mako'
def update_params(self, d):
""" Register this widgets stomp callbacks """
super(LiveWidget, self).update_params(d)
topics = d.get('topic', getattr(self, 'topic', d.get('topics', getattr(self, 'topics', None))))
if not topics:
raise MokshaException('You must specify a `topic` to subscribe to')
topics = isinstance(topics, list) and topics or [topics]
for callback in StompWidget.callbacks:
if callback == 'onmessageframe':
for topic in topics:
cb = getattr(self, 'onmessage').replace('${id}', self.id)
moksha.stomp[callback][topic].append(cb)
elif callback == 'onconnectedframe':
moksha.stomp['onconnectedframe'].append(stomp_subscribe(topics))
elif callback in self.params:
moksha.stomp[callback].append(getattr(self, callback))
def get_topics(self):
topics = []
for key in ('topic', 'topics'):
if hasattr(self, key):
topic = getattr(self, key)
if topic:
if isinstance(topic, basestring):
map(topics.append, topic.split())
else:
topics += topic
return topics
# Moksha Topic subscription handling methods
subscribe_topics = stomp_subscribe
unsubscribe_topics = stomp_unsubscribe
| # This file is part of Moksha.
# Copyright (C) 2008-2009 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luke Macken <lmacken@redhat.com>
import moksha
from tw.api import Widget
from moksha.exc import MokshaException
from moksha.api.widgets.stomp import StompWidget, stomp_subscribe, stomp_unsubscribe
class LiveWidget(Widget):
""" A live streaming widget.
This widget handles automatically subscribing your widget to any given
topics, and registers all of the stomp callbacks.
"""
engine_name = 'mako'
def update_params(self, d):
""" Register this widgets stomp callbacks """
super(LiveWidget, self).update_params(d)
topics = d.get('topic', getattr(self, 'topic', None))
if not topics:
raise MokshaException('You must specify a `topic` to subscribe to')
topics = isinstance(topics, list) and topics or [topics]
for callback in StompWidget.callbacks:
if callback == 'onmessageframe':
for topic in topics:
cb = getattr(self, 'onmessage').replace('${id}', self.id)
moksha.stomp[callback][topic].append(cb)
elif callback == 'onconnectedframe':
moksha.stomp['onconnectedframe'].append(stomp_subscribe(topics))
elif callback in self.params:
moksha.stomp[callback].append(getattr(self, callback))
def get_topics(self):
topics = []
for key in ('topic', 'topics'):
if hasattr(self, key):
topic = getattr(self, key)
if topic:
if isinstance(topic, basestring):
map(topics.append, topic.split())
else:
topics += topic
return topics
# Moksha Topic subscription handling methods
subscribe_topics = stomp_subscribe
unsubscribe_topics = stomp_unsubscribe
| Python | 0 |
410f02a4f657f9a8b9c839f3e08b176f443de9e8 | Handle cases when searched word is only part of the people name. | linkedin_scraper/spiders/people_search.py | linkedin_scraper/spiders/people_search.py | from os import environ
from scrapy_splash import SplashRequest
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
class PeopleSearchSpider(InitSpider):
name = 'people_search'
allowed_domains = ['linkedin.com']
login_page = 'https://www.linkedin.com/uas/login'
def __init__(self, *args, **kwargs):
try:
self.username = (kwargs.pop('username', None) or
environ['SPIDER_USERNAME'])
self.password = (kwargs.pop('password', None) or
environ['SPIDER_PASSWORD'])
except KeyError:
raise Exception('Both username and password need to be specified '
'by -a option or SPIDER_<PARAM> environment var')
query = kwargs.pop('query', 'Mateusz+Moneta')
self.start_urls = [
'https://www.linkedin.com/vsearch/f?type=people&keywords=%s' % query
]
super().__init__(*args, **kwargs)
def init_request(self):
return Request(url=self.login_page, callback=self.login)
def login(self, response):
return FormRequest.from_response(
response, callback=self.check_login_response,
formdata={'session_key': self.username,
'session_password': self.password})
def parse(self, response):
for search_result in response.css('li.mod.result.people'):
names = search_result.css('a.title.main-headline').xpath(
'string(.)').extract_first()
*first_name, last_name = names.split()
yield {
'first_name': ' '.join(first_name),
'last_name': last_name,
}
def check_login_response(self, response):
if b'Sign Out' in response.body:
self.logger.debug("Successfully logged in. Let's start crawling!")
return self.initialized()
self.logger.error('Login failed!')
def make_requests_from_url(self, url):
# Do SplashRequest instead of regular one to be able to evaluate
# JavaScript responsible for dynamic page generation.
return SplashRequest(url)
| from os import environ
from scrapy_splash import SplashRequest
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
class PeopleSearchSpider(InitSpider):
name = 'people_search'
allowed_domains = ['linkedin.com']
login_page = 'https://www.linkedin.com/uas/login'
def __init__(self, *args, **kwargs):
try:
self.username = (kwargs.pop('username', None) or
environ['SPIDER_USERNAME'])
self.password = (kwargs.pop('password', None) or
environ['SPIDER_PASSWORD'])
except KeyError:
raise Exception('Both username and password need to be specified '
'by -a option or SPIDER_<PARAM> environment var')
query = kwargs.pop('query', 'Mateusz+Moneta')
self.start_urls = [
'https://www.linkedin.com/vsearch/f?type=people&keywords=%s' % query
]
super().__init__(*args, **kwargs)
def init_request(self):
return Request(url=self.login_page, callback=self.login)
def login(self, response):
return FormRequest.from_response(
response, callback=self.check_login_response,
formdata={'session_key': self.username,
'session_password': self.password})
def parse(self, response):
for search_result in response.css('li.mod.result.people'):
*first_name, last_name = search_result.css('b::text').extract()
yield {
'first_name': ' '.join(first_name),
'last_name': last_name,
}
def check_login_response(self, response):
if b'Sign Out' in response.body:
self.logger.debug("Successfully logged in. Let's start crawling!")
return self.initialized()
self.logger.error('Login failed!')
def make_requests_from_url(self, url):
# Do SplashRequest instead of regular one to be able to evaluate
# JavaScript responsible for dynamic page generation.
return SplashRequest(url)
| Python | 0.000001 |
0e0a0de8f6be116ba00a6938586dcbc315c4db3f | Clarify that symbolic representations are Tensors | cleverhans/model.py | cleverhans/model.py | from abc import ABCMeta
class Model(object):
"""
An abstract interface for model wrappers that exposes model symbols
needed for making an attack. This abstraction removes the dependency on
any specific neural network package (e.g. Keras) from the core
code of CleverHans. It can also simplify exposing the hidden features of a
model when a specific package does not directly expose them.
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
def __call__(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
return self.get_probs(*args, **kwargs)
def get_layer(self, x, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation (Tensor) of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation (Tensor) of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation (Tensor) for this layer.
output = self.fprop(x)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested
def get_logits(self, x):
"""
:param x: A symbolic representation (Tensor) of the network input
:return: A symbolic representation (Tensor) of the output logits
(i.e., the values fed as inputs to the softmax layer).
"""
return self.get_layer(x, 'logits')
def get_probs(self, x):
"""
:param x: A symbolic representation (Tensor) of the network input
:return: A symbolic representation (Tensor) of the output
probabilities (i.e., the output values produced by the softmax layer).
"""
try:
return self.get_layer(x, 'probs')
except NoSuchLayerError:
pass
except NotImplementedError:
pass
import tensorflow as tf
return tf.nn.softmax(self.get_logits(x))
def get_layer_names(self):
"""
:return: a list of names for the layers that can be exposed by this
model abstraction.
"""
if hasattr(self, 'layer_names'):
return self.layer_names
raise NotImplementedError('`get_layer_names` not implemented.')
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation (Tensor) of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
raise NotImplementedError('`fprop` not implemented.')
def get_params(self):
"""
Provides access to the model's parameters.
:return: A list of all Variables defining the model parameters.
"""
raise NotImplementedError()
class CallableModelWrapper(Model):
def __init__(self, callable_fn, output_layer):
"""
Wrap a callable function that takes a tensor as input and returns
a tensor as output with the given layer name.
:param callable_fn: The callable function taking a tensor and
returning a given layer as output.
:param output_layer: A string of the output layer returned by the
function. (Usually either "probs" or "logits".)
"""
self.output_layer = output_layer
self.callable_fn = callable_fn
def get_layer_names(self):
return [self.output_layer]
def fprop(self, x):
return {self.output_layer: self.callable_fn(x)}
class NoSuchLayerError(ValueError):
"""Raised when a layer that does not exist is requested."""
| from abc import ABCMeta
class Model(object):
"""
An abstract interface for model wrappers that exposes model symbols
needed for making an attack. This abstraction removes the dependency on
any specific neural network package (e.g. Keras) from the core
code of CleverHans. It can also simplify exposing the hidden features of a
model when a specific package does not directly expose them.
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
def __call__(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
return self.get_probs(*args, **kwargs)
def get_layer(self, x, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation for this layer.
output = self.fprop(x)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output logits (i.e., the
values fed as inputs to the softmax layer).
"""
return self.get_layer(x, 'logits')
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output probabilities (i.e.,
the output values produced by the softmax layer).
"""
try:
return self.get_layer(x, 'probs')
except NoSuchLayerError:
pass
except NotImplementedError:
pass
import tensorflow as tf
return tf.nn.softmax(self.get_logits(x))
def get_layer_names(self):
"""
:return: a list of names for the layers that can be exposed by this
model abstraction.
"""
if hasattr(self, 'layer_names'):
return self.layer_names
raise NotImplementedError('`get_layer_names` not implemented.')
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
raise NotImplementedError('`fprop` not implemented.')
def get_params(self):
"""
Provides access to the model's parameters.
:return: A list of all Variables defining the model parameters.
"""
raise NotImplementedError()
class CallableModelWrapper(Model):
def __init__(self, callable_fn, output_layer):
"""
Wrap a callable function that takes a tensor as input and returns
a tensor as output with the given layer name.
:param callable_fn: The callable function taking a tensor and
returning a given layer as output.
:param output_layer: A string of the output layer returned by the
function. (Usually either "probs" or "logits".)
"""
self.output_layer = output_layer
self.callable_fn = callable_fn
def get_layer_names(self):
return [self.output_layer]
def fprop(self, x):
return {self.output_layer: self.callable_fn(x)}
class NoSuchLayerError(ValueError):
"""Raised when a layer that does not exist is requested."""
| Python | 0.999999 |
655fcce56abd0d3f0da9b52e911636d931157443 | bump version | dockercloud/__init__.py | dockercloud/__init__.py | import base64
import logging
import os
import requests
from future.standard_library import install_aliases
install_aliases()
from dockercloud.api import auth
from dockercloud.api.service import Service
from dockercloud.api.container import Container
from dockercloud.api.repository import Repository
from dockercloud.api.node import Node
from dockercloud.api.action import Action
from dockercloud.api.nodecluster import NodeCluster
from dockercloud.api.nodetype import NodeType
from dockercloud.api.nodeprovider import Provider
from dockercloud.api.noderegion import Region
from dockercloud.api.tag import Tag
from dockercloud.api.trigger import Trigger
from dockercloud.api.stack import Stack
from dockercloud.api.exceptions import ApiError, AuthError, ObjectNotFound, NonUniqueIdentifier
from dockercloud.api.utils import Utils
from dockercloud.api.events import Events
from dockercloud.api.nodeaz import AZ
__version__ = '1.0.6'
dockercloud_auth = os.environ.get('DOCKERCLOUD_AUTH')
basic_auth = auth.load_from_file("~/.docker/config.json")
if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_PASS'):
basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_PASS')))
if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_APIKEY'):
basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_APIKEY')))
rest_host = os.environ.get("DOCKERCLOUD_REST_HOST") or 'https://cloud.docker.com/'
stream_host = os.environ.get("DOCKERCLOUD_STREAM_HOST") or 'wss://ws.cloud.docker.com/'
namespace = os.environ.get('DOCKERCLOUD_NAMESPACE')
user_agent = None
logging.basicConfig()
logger = logging.getLogger("python-dockercloud")
try:
requests.packages.urllib3.disable_warnings()
except:
pass
| import base64
import logging
import os
import requests
from future.standard_library import install_aliases
install_aliases()
from dockercloud.api import auth
from dockercloud.api.service import Service
from dockercloud.api.container import Container
from dockercloud.api.repository import Repository
from dockercloud.api.node import Node
from dockercloud.api.action import Action
from dockercloud.api.nodecluster import NodeCluster
from dockercloud.api.nodetype import NodeType
from dockercloud.api.nodeprovider import Provider
from dockercloud.api.noderegion import Region
from dockercloud.api.tag import Tag
from dockercloud.api.trigger import Trigger
from dockercloud.api.stack import Stack
from dockercloud.api.exceptions import ApiError, AuthError, ObjectNotFound, NonUniqueIdentifier
from dockercloud.api.utils import Utils
from dockercloud.api.events import Events
from dockercloud.api.nodeaz import AZ
__version__ = '1.0.5'
dockercloud_auth = os.environ.get('DOCKERCLOUD_AUTH')
basic_auth = auth.load_from_file("~/.docker/config.json")
if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_PASS'):
basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_PASS')))
if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_APIKEY'):
basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_APIKEY')))
rest_host = os.environ.get("DOCKERCLOUD_REST_HOST") or 'https://cloud.docker.com/'
stream_host = os.environ.get("DOCKERCLOUD_STREAM_HOST") or 'wss://ws.cloud.docker.com/'
namespace = os.environ.get('DOCKERCLOUD_NAMESPACE')
user_agent = None
logging.basicConfig()
logger = logging.getLogger("python-dockercloud")
try:
requests.packages.urllib3.disable_warnings()
except:
pass
| Python | 0 |
6589c5cc30c228e5aacd77184310e9afd9dc0345 | Fix test | tests/test_contributors_views.py | tests/test_contributors_views.py | # -*- coding: utf-8 -*-
from nose.tools import * # noqa; PEP8 asserts
from tests.factories import ProjectFactory, NodeFactory, AuthUserFactory
from tests.base import OsfTestCase, fake
from framework.auth.decorators import Auth
from website.profile import utils
class TestContributorUtils(OsfTestCase):
def setUp(self):
super(TestContributorUtils, self).setUp()
self.project = ProjectFactory()
def test_serialize_user(self):
serialized = utils.serialize_user(self.project.creator, self.project)
assert_true(serialized['visible'])
assert_equal(serialized['permission'], 'admin')
def test_serialize_user_admin(self):
serialized = utils.serialize_user(self.project.creator, self.project, admin=True)
assert_false(serialized['visible'])
assert_equal(serialized['permission'], 'read')
class TestContributorViews(OsfTestCase):
def setUp(self):
super(TestContributorViews, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def test_get_contributors_no_limit(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
url = self.project.api_url_for('get_contributors')
res = self.app.get(url, auth=self.user.auth)
# Should be two visible contributors on the project
assert_equal(
len(res.json['contributors']),
2,
)
def test_get_contributors_with_limit(self):
# Add five contributors
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
# Set limit to three contributors
url = self.project.api_url_for('get_contributors', limit=3)
res = self.app.get(url, auth=self.user.auth)
# Should be three visible contributors on the project
assert_equal(
len(res.json['contributors']),
3,
)
# There should be two 'more' contributors not shown
assert_equal(
(res.json['more']),
2,
)
def test_get_contributors_from_parent(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
component = NodeFactory(parent=self.project, creator=self.user)
url = component.api_url_for('get_contributors_from_parent')
res = self.app.get(url, auth=self.user.auth)
# Should be all contributors, client-side handles marking
# contributors that are already added to the child.
assert_equal(
len(res.json['contributors']),
2,
)
| # -*- coding: utf-8 -*-
from nose.tools import * # noqa; PEP8 asserts
from tests.factories import ProjectFactory, NodeFactory, AuthUserFactory
from tests.base import OsfTestCase, fake
from framework.auth.decorators import Auth
from website.profile import utils
class TestContributorUtils(OsfTestCase):
def setUp(self):
super(TestContributorUtils, self).setUp()
self.project = ProjectFactory()
def test_serialize_user(self):
serialized = utils.serialize_user(self.project.creator, self.project)
assert_true(serialized['visible'])
assert_equal(serialized['permission'], 'admin')
def test_serialize_user_admin(self):
serialized = utils.serialize_user(self.project.creator, self.project, admin=True)
assert_false(serialized['visible'])
assert_equal(serialized['permission'], 'read')
class TestContributorViews(OsfTestCase):
def setUp(self):
super(TestContributorViews, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def test_get_contributors_no_limit(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
url = self.project.api_url_for('get_contributors')
res = self.app.get(url, auth=self.user.auth)
# Should be two visible contributors on the project
assert_equal(
len(res.json['contributors']),
2,
)
def test_get_contributors_with_limit(self):
# Add five contributors
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
# Set limit to three contributors
url = self.project.api_url_for('get_contributors', limit=3)
res = self.app.get(url, auth=self.user.auth)
# Should be three visible contributors on the project
assert_equal(
len(res.json['contributors']),
3,
)
# There should be two 'more' contributors not shown
assert_equal(
(res.json['more']),
2,
)
def test_get_contributors_from_parent(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
component = NodeFactory(parent=self.project, creator=self.user)
url = component.api_url_for('get_contributors_from_parent')
res = self.app.get(url, auth=self.user.auth)
# Should be one contributor to the parent who is both visible and
# not a contributor on the component
assert_equal(
len(res.json['contributors']),
1,
)
| Python | 0.000004 |
b0c5d485543e123c985336d054b6f20d60634221 | Add new kumquat settings.py file from the origin. We need to find a other solution in the future to overwrite config files | copy/tmp/kumquat-settings.py | copy/tmp/kumquat-settings.py | """
Django settings for kumquat_web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys, os
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'messagegroups',
'kumquat',
'web',
'mysql',
'ftp',
'status',
'mail',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'kumquat_web.urls'
WSGI_APPLICATION = 'kumquat_web.wsgi.application'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django_settings_export.settings_export',
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
# Logging
# https://docs.djangoproject.com/en/dev/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
# Only use logging if debug is false and it's a production environment
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
},
'handlers': {
# Log to stdout
'console': {
'class':'logging.StreamHandler',
'stream': sys.stdout,
},
# Log to syslog because this is much cleaner than extra file
'syslog': {
'class': 'logging.handlers.SysLogHandler',
'facility': 'local1',
'address': ('127.0.0.1', 514),
'formatter': 'simple',
},
},
'loggers': {
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['console', 'syslog'],
'level': 'ERROR',
'propagate': False,
},
},
}
# kumquat
KUMQUAT_WEBMAIL_URL = ''
KUMQUAT_PHPMYADMIN_URL = ''
# Allow the following variables in the template
SETTINGS_EXPORT = [
'KUMQUAT_WEBMAIL_URL',
'KUMQUAT_PHPMYADMIN_URL',
]
| """
Django settings for kumquat_web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'messagegroups',
'kumquat',
'web',
'mysql',
'ftp',
'status',
'mail',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'kumquat_web.urls'
WSGI_APPLICATION = 'kumquat_web.wsgi.application'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Redirect after LOGIN
LOGIN_REDIRECT_URL = '/'
# Logging
# https://docs.djangoproject.com/en/dev/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
# Only use logging if debug is false and it's a production environment
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
},
'handlers': {
# Log to stdout
'console': {
'class':'logging.StreamHandler',
'stream': sys.stdout,
},
# Log to syslog because this is much cleaner than extra file
'syslog': {
'class': 'logging.handlers.SysLogHandler',
'facility': 'local1',
'address': ('127.0.0.1', 514),
'formatter': 'simple',
},
},
'loggers': {
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['console', 'syslog'],
'level': 'ERROR',
'propagate': False,
},
},
}
| Python | 0 |
b41e563c866a8918a65253c1cbb1a1fa5c44c212 | Placate angry bot | xunit-autolabeler-v2/ast_parser/python/test_data/new_tests/fixture_detection_test.py | xunit-autolabeler-v2/ast_parser/python/test_data/new_tests/fixture_detection_test.py | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def setup():
pass
def init():
pass
def setup_something():
pass
def init_something():
pass
def another_setup():
pass
def another_init():
pass
| # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def setup():
pass
def init():
pass
def setup_something():
pass
def init_something():
pass
def another_setup():
pass
def another_init():
pass
| Python | 0.999805 |
4617750140daf87e6e686bce19497a0e4e8bea75 | remove out of context request checking | tests/test_ember_osf_web.py | tests/test_ember_osf_web.py |
import mock
from flask import request
from tests.base import OsfTestCase
from website.ember_osf_web.decorators import ember_flag_is_active
from osf_tests.factories import FlagFactory, UserFactory
from django.contrib.auth.models import Group
class TestEmberFlagIsActive(OsfTestCase):
def setUp(self):
super(TestEmberFlagIsActive, self).setUp()
self.flag = FlagFactory(name='active_flag')
FlagFactory(name='inactive_flag', everyone=False).save()
self.mock_func = lambda: 'test value'
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_use_ember_app(self, mock_use_ember_app):
ember_flag_is_active('active_flag')(self.mock_func)()
mock_use_ember_app.assert_called_with()
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_dont_use_ember_app(self, mock_use_ember_app):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=False)
ember_flag_is_active('inactive_flag')(self.mock_func)()
assert not mock_use_ember_app.called
@mock.patch('api.waffle.utils._get_current_user')
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_authenticated_user(self, mock_use_ember_app, mock__get_current_user):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=True)
user = UserFactory()
mock__get_current_user.return_value = user
ember_flag_is_active('active_flag')(self.mock_func)()
api.waffle.utils.waffle.flag_is_active.assert_called_with(request, 'active_flag')
mock_use_ember_app.assert_called_with()
@mock.patch('api.waffle.utils._get_current_user', return_value=None)
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_unauthenticated_user(self, mock_use_ember_app, mock__get_current_user):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=True)
ember_flag_is_active('active_flag')(self.mock_func)()
group = Group.objects.create(name='foo')
self.flag.groups.add(group)
api.waffle.utils.waffle.flag_is_active.assert_called_with(request, 'active_flag')
mock_use_ember_app.assert_called_with()
|
import mock
from flask import request
from tests.base import OsfTestCase
from website.ember_osf_web.decorators import ember_flag_is_active
from osf_tests.factories import FlagFactory, UserFactory
from django.contrib.auth.models import Group
class TestEmberFlagIsActive(OsfTestCase):
def setUp(self):
super(TestEmberFlagIsActive, self).setUp()
self.flag = FlagFactory(name='active_flag')
FlagFactory(name='inactive_flag', everyone=False).save()
self.mock_func = lambda: 'test value'
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_use_ember_app(self, mock_use_ember_app):
ember_flag_is_active('active_flag')(self.mock_func)()
mock_use_ember_app.assert_called_with()
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_dont_use_ember_app(self, mock_use_ember_app):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=False)
ember_flag_is_active('inactive_flag')(self.mock_func)()
assert not mock_use_ember_app.called
@mock.patch('api.waffle.utils._get_current_user')
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_authenticated_user(self, mock_use_ember_app, mock__get_current_user):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=True)
user = UserFactory()
mock__get_current_user.return_value = user
ember_flag_is_active('active_flag')(self.mock_func)()
api.waffle.utils.waffle.flag_is_active.assert_called_with(request, 'active_flag')
assert request.user == user
mock_use_ember_app.assert_called_with()
@mock.patch('api.waffle.utils._get_current_user', return_value=None)
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_unauthenticated_user(self, mock_use_ember_app, mock__get_current_user):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=True)
ember_flag_is_active('active_flag')(self.mock_func)()
group = Group.objects.create(name='foo')
self.flag.groups.add(group)
api.waffle.utils.waffle.flag_is_active.assert_called_with(request, 'active_flag')
assert not request.user.is_authenticated
mock_use_ember_app.assert_called_with()
| Python | 0.000001 |
dae16f72b9ca5d96c7f894601aa3a69facbbb00e | Fix memory limit in MongoDB while loading logs (#5) | scripts/load_logs_to_mongodb.py | scripts/load_logs_to_mongodb.py | import os
import sys
from datetime import datetime
from collections import defaultdict
from pymongo import MongoClient
logs_file = open(sys.argv[1])
article_urls = set()
article_views = defaultdict(list) # article_url: list of user's id's
article_times = {}
for line in logs_file:
try:
timestamp, url, user = line.strip().split('\t')
except IndexError:
continue
timestamp = timestamp.strip(' GET').strip('Z')
# Delete ms from timestamp
timestamp = ''.join(timestamp.split('.')[:-1])
event_time = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')
if not url or not user:
continue
if not url.startswith('https://tvrain.ru/'):
continue
article_urls.add(url)
article_views[url].append(user)
# Save time of only first event
if url not in article_times:
article_times[url] = event_time
mongodb_client = MongoClient(os.environ['MONGODB_URL'])
db = mongodb_client.tvrain
parsed_articles = db.tvrain
articles = db.articles
# Clear articles
articles.remove({})
for article in parsed_articles.find():
if article['url'] not in article_urls:
continue
views = article_views[article['url']]
compressed_views = []
# Save only every 10th view
for i in range(len(views)):
if i % 10 == 0:
compressed_views.append(views[i])
articles.insert_one({
'_id': article['_id'],
'title': article['title'],
'text': article['text'],
'views': compressed_views,
'time': article_times[article['url']]
})
| import os
import sys
from datetime import datetime
from collections import defaultdict
from pymongo import MongoClient
logs_file = open(sys.argv[1])
article_urls = set()
article_views = defaultdict(list) # article_url: list of user's id's
article_times = {}
for line in logs_file:
try:
timestamp, url, user = line.strip().split('\t')
except IndexError:
continue
timestamp = timestamp.strip(' GET').strip('Z')
# Delete ms from timestamp
timestamp = ''.join(timestamp.split('.')[:-1])
event_time = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')
if not url or not user:
continue
if not url.startswith('https://tvrain.ru/'):
continue
article_urls.add(url)
article_views[url].append(user)
# Save time of only first event
if url not in article_times:
article_times[url] = event_time
mongodb_client = MongoClient(os.environ['MONGODB_URL'])
db = mongodb_client.tvrain
parsed_articles = db.tvrain
articles = db.articles
for article in parsed_articles.find():
if article['url'] not in article_urls:
continue
articles.insert_one({
'_id': article['_id'],
'title': article['title'],
'text': article['text'],
'views': article_views[article['url']],
'time': article_times[article['url']]
})
| Python | 0 |
3375c9cd3311bff8ff3ab07c361e18c68226784c | remove stray print | mc2/controllers/base/managers/rabbitmq.py | mc2/controllers/base/managers/rabbitmq.py | import base64
import hashlib
import random
import time
import uuid
from django.conf import settings
from pyrabbit.api import Client
from pyrabbit.http import HTTPError
class ControllerRabbitMQManager(object):
def __init__(self, controller):
"""
A helper manager to get to connect to RabbitMQ
:param controller Controller: A Controller model instance
"""
self.ctrl = controller
self.client = Client(
settings.RABBITMQ_API_HOST,
settings.RABBITMQ_API_USERNAME,
settings.RABBITMQ_API_PASSWORD)
def _create_password(self):
# Guranteed random dice rolls
return base64.b64encode(
hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24]
def _create_username(self):
return base64.b64encode(str(
time.time() + random.random() * time.time())).strip('=').lower()
def create_rabbitmq_vhost(self):
"""
Attempts to create a new vhost. Returns false if vhost already exists.
The new username/password will be saved on the controller if a new
vhost was created
:returns: bool
"""
try:
self.client.get_vhost(self.ctrl.rabbitmq_vhost_name)
return False # already exists
except HTTPError:
pass
self.client.create_vhost(self.ctrl.rabbitmq_vhost_name)
# create user/pass
username = self._create_username()
password = self._create_password()
self.client.create_user(username, password)
# save newly created username/pass
self.ctrl.rabbitmq_vhost_username = username
self.ctrl.rabbitmq_vhost_password = password
self.ctrl.rabbitmq_vhost_host = settings.RABBITMQ_APP_HOST
self.ctrl.save()
self.client.set_vhost_permissions(
self.ctrl.rabbitmq_vhost_name, username, '.*', '.*', '.*')
return True
| import base64
import hashlib
import random
import time
import uuid
from django.conf import settings
from pyrabbit.api import Client
from pyrabbit.http import HTTPError
class ControllerRabbitMQManager(object):
def __init__(self, controller):
"""
A helper manager to get to connect to RabbitMQ
:param controller Controller: A Controller model instance
"""
self.ctrl = controller
self.client = Client(
settings.RABBITMQ_API_HOST,
settings.RABBITMQ_API_USERNAME,
settings.RABBITMQ_API_PASSWORD)
print self.client
def _create_password(self):
# Guranteed random dice rolls
return base64.b64encode(
hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24]
def _create_username(self):
return base64.b64encode(str(
time.time() + random.random() * time.time())).strip('=').lower()
def create_rabbitmq_vhost(self):
"""
Attempts to create a new vhost. Returns false if vhost already exists.
The new username/password will be saved on the controller if a new
vhost was created
:returns: bool
"""
try:
self.client.get_vhost(self.ctrl.rabbitmq_vhost_name)
return False # already exists
except HTTPError:
pass
self.client.create_vhost(self.ctrl.rabbitmq_vhost_name)
# create user/pass
username = self._create_username()
password = self._create_password()
self.client.create_user(username, password)
# save newly created username/pass
self.ctrl.rabbitmq_vhost_username = username
self.ctrl.rabbitmq_vhost_password = password
self.ctrl.rabbitmq_vhost_host = settings.RABBITMQ_APP_HOST
self.ctrl.save()
self.client.set_vhost_permissions(
self.ctrl.rabbitmq_vhost_name, username, '.*', '.*', '.*')
return True
| Python | 0.000215 |
2cee1d5bff32831a9c15755e7482057ac7b9a39a | Update packets.py | cs143sim/packets.py | cs143sim/packets.py | """This module contains all packet definitions.
.. autosummary::
Packet
DataPacket
RouterPacket
.. moduleauthor:: Lan Hongjian <lanhongjianlr@gmail.com>
.. moduleauthor:: Yamei Ou <oym111@gmail.com>
.. moduleauthor:: Samuel Richerd <dondiego152@gmail.com>
.. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com>
.. moduleauthor:: Junlin Zhang <neicullyn@gmail.com>
"""
from cs143sim.constants import PACKET_SIZE
class Packet(object):
"""Representation of a quantum of information
Packets carry information along the network, between :class:`Hosts <.Host>`
or :class:`Routers <.Router>`.
:param destination: destination :class:`.Host` or :class:`.Router`
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
:ivar destination: destination :class:`.Host` or :class:`.Router`
:ivar source: source :class:`.Host` or :class:`.Router`
:ivar str timestamp: time at which the packet was created
"""
def __init__(self, destination, source, timestamp):
self.timestamp = timestamp
self.source = source
self.destination = destination
self.size = PACKET_SIZE
class DataPacket(Packet):
"""A packet used for transferring data
:param destination: destination :class:`.Host` or :class:`.Router`
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
"""
def __init__(self, destination, source, timestamp, acknowledgement, number):
# TODO: define number and acknowledgement in docstring
super(DataPacket, self).__init__(timestamp=timestamp, source=source,
destination=destination)
self.number = number
self.acknowledgement = acknowledgement
class RouterPacket(Packet):
"""A packet used to update routing tables
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
"""
def __init__(self, source, timestamp, router_table, acknowledgement):
# TODO: define router_table in docstring
super(RouterPacket, self).__init__(timestamp=timestamp, source=source,
destination=0)
self.router_table = router_table
self.number = 0
self.acknowledgement = acknowledgement
| """This module contains all packet definitions.
.. autosummary::
Packet
DataPacket
RouterPacket
.. moduleauthor:: Lan Hongjian <lanhongjianlr@gmail.com>
.. moduleauthor:: Yamei Ou <oym111@gmail.com>
.. moduleauthor:: Samuel Richerd <dondiego152@gmail.com>
.. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com>
.. moduleauthor:: Junlin Zhang <neicullyn@gmail.com>
"""
from cs143sim.constants import PACKET_SIZE
class Packet(object):
"""Representation of a quantum of information
Packets carry information along the network, between :class:`Hosts <.Host>`
or :class:`Routers <.Router>`.
:param destination: destination :class:`.Host` or :class:`.Router`
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
:ivar destination: destination :class:`.Host` or :class:`.Router`
:ivar source: source :class:`.Host` or :class:`.Router`
:ivar str timestamp: time at which the packet was created
"""
def __init__(self, destination, source, timestamp):
self.timestamp = timestamp
self.source = source
self.destination = destination
self.size = PACKET_SIZE
class DataPacket(Packet):
"""A packet used for transferring data
:param destination: destination :class:`.Host` or :class:`.Router`
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
"""
def __init__(self, destination, source, timestamp, acknowledgement, number):
# TODO: define number and acknowledgement in docstring
super(DataPacket, self).__init__(timestamp=timestamp, source=source,
destination=destination)
self.number = number
self.acknowledgement = acknowledgement
class RouterPacket(Packet):
"""A packet used to update routing tables
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
"""
def __init__(self, source, timestamp, router_table, acknowledgement):
# TODO: define router_table in docstring
super(RouterPacket, self).__init__(timestamp=timestamp, source=source,
destination=0)
self.router_table = router_table
self.acknowledgement = acknowledgement
| Python | 0.000001 |
0c35c0f7fe126b87eccdf4f69933b84927956658 | Fix account __type__ | module/plugins/accounts/XFileSharingPro.py | module/plugins/accounts/XFileSharingPro.py | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.XFSPAccount import XFSPAccount
class XFileSharingPro(XFSPAccount):
__name__ = "XFileSharingPro"
__type__ = "account"
__version__ = "0.02"
__description__ = """XFileSharingPro multi-purpose account plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def init(self):
pattern = self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
self.HOSTER_NAME = re.match(pattern, self.pyfile.url).group(1).lower()
| # -*- coding: utf-8 -*-
import re
from module.plugins.internal.XFSPAccount import XFSPAccount
class XFileSharingPro(XFSPAccount):
__name__ = "XFileSharingPro"
__type__ = "crypter"
__version__ = "0.01"
__description__ = """XFileSharingPro dummy account plugin for hook"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def init(self):
pattern = self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
self.HOSTER_NAME = re.match(pattern, self.pyfile.url).group(1).lower()
| Python | 0 |
a396d3e7b4de10710c2f2e0beab0ef82acaf866b | Create first test | web/impact/impact/tests/test_track_api_calls.py | web/impact/impact/tests/test_track_api_calls.py | from django.test import (
TestCase,
)
from mock import mock, patch
from impact.tests.api_test_case import APITestCase
class TestTrackAPICalls(APITestCase):
@patch('impact.middleware.track_api_calls.TrackAPICalls.process_request.logger')
def test_when_user_authenticated(self, logger_info_patch):
with self.login(email=self.basic_user().email):
response = self.client.get(/)
logger_info_patch.info.assert_called_with()
def test_when_no_user_authenticated(self):
pass
| from django.test import (
RequestFactory,
TestCase,
)
from mock import patch
class TestTrackAPICalls(TestCase):
def test_when_user_auth(self):
pass
def test_when_no_user_auth(self):
pass
| Python | 0 |
1fc456f00d9895358ee52e967edfdfc2512315d0 | Update stackexchange.py | data_loaders/stackexchange.py | data_loaders/stackexchange.py | #
# stackexchange.py
# Mich, 2015-03-12
# Copyright (c) 2015 Datacratic Inc. All rights reserved.
#
import requests
import json
from datetime import datetime
def load_data(mldb, payload):
mldb.log("StackExchange data loader")
payload = json.loads(payload)
assert payload['site'], mldb.log("payload: site is undefined")
page = 0
has_more = True
key = None
if 'key' in payload:
key = payload['key']
site = payload['site'].encode("utf-8")
mldb.log("Got site:" + site.encode("utf-8"))
dataset_id = site.encode("utf-8") + '_dataset'
dataset_config = {
'type' : 'mutable',
'id' : dataset_id,
'params': { 'artifactUri' : 'file:///mldb_data/' + site + '_dataset.beh.gz' }
}
url = "/v1/datasets/" + dataset_id
result = mldb.perform("GET", url, [], {})
if result['statusCode'] == 200:
mldb.log("Dataset was already loaded")
return {
'datasetId' : dataset_id,
'count' : '?',
'quotaRemaining' : '?'
}
dataset = mldb.create_dataset(dataset_config)
mldb.log("stackexchange data loader created dataset " + dataset_id)
now = datetime.now() # foo date, timeless features
count = 0
page = 0
quota_remaining = "Unknown"
while has_more:
page += 1
params = {
'site' : site,
'pagesize' : 100,
'page' : page
}
if key:
params['key'] = key
r = requests.get('https://api.stackexchange.com/2.2/questions',
params=params)
assert r.status_code == 200, mldb.log("Failed to fetch questions: "
+ r.content)
result = json.loads(r.content)
has_more = result['has_more']
quota_remaining = result['quota_remaining']
for question in result['items']:
if len(question['tags']) > 1:
triplet = [[question['question_id'], '1', now]]
for tag in question['tags']:
tag = tag.encode("utf-8")
if count == 0:
mldb.log("stackexchange data loader first line: {}, {}"
.format(tag, triplet))
dataset.record_row(tag, triplet)
if count == 0:
mldb.log("stackexchange data loader recorded first row")
count += 1
if count == 20000:
mldb.log("stackexchange data loader stopping at 20k lines")
has_more = False
break
else:
continue
break
dataset.commit()
mldb.log("Fetched {} tags".format(count))
return {
'datasetId' : dataset_id,
'count' : count,
'quotaRemaining' : quota_remaining
}
| #
# stackexchange.py
# Mich, 2015-03-12
# Copyright (c) 2015 Datacratic Inc. All rights reserved.
#
import requests
import json
from datetime import datetime
def load_data(mldb, payload):
mldb.log("StackExchange data loader")
payload = json.loads(payload)
assert payload['site'], mldb.log("payload: site is undefined")
page = 0
has_more = True
key = None
if 'key' in payload:
key = payload['key']
site = payload['site'].encode("utf-8")
mldb.log("Got site:" + site.encode("utf-8"))
dataset_id = site.encode("utf-8") + '_dataset'
dataset_config = {
'type' : 'mutable',
'id' : dataset_id,
'params': { 'artifactUri' : 'file:///var/mldb/' + site + '_dataset.beh.gz' }
}
url = "/v1/datasets/" + dataset_id
result = mldb.perform("GET", url, [], {})
if result['statusCode'] == 200:
mldb.log("Dataset was already loaded")
return {
'datasetId' : dataset_id,
'count' : '?',
'quotaRemaining' : '?'
}
dataset = mldb.create_dataset(dataset_config)
mldb.log("stackexchange data loader created dataset " + dataset_id)
now = datetime.now() # foo date, timeless features
count = 0
page = 0
quota_remaining = "Unknown"
while has_more:
page += 1
params = {
'site' : site,
'pagesize' : 100,
'page' : page
}
if key:
params['key'] = key
r = requests.get('https://api.stackexchange.com/2.2/questions',
params=params)
assert r.status_code == 200, mldb.log("Failed to fetch questions: "
+ r.content)
result = json.loads(r.content)
has_more = result['has_more']
quota_remaining = result['quota_remaining']
for question in result['items']:
if len(question['tags']) > 1:
triplet = [[question['question_id'], '1', now]]
for tag in question['tags']:
tag = tag.encode("utf-8")
if count == 0:
mldb.log("stackexchange data loader first line: {}, {}"
.format(tag, triplet))
dataset.record_row(tag, triplet)
if count == 0:
mldb.log("stackexchange data loader recorded first row")
count += 1
if count == 20000:
mldb.log("stackexchange data loader stopping at 20k lines")
has_more = False
break
else:
continue
break
dataset.commit()
mldb.log("Fetched {} tags".format(count))
return {
'datasetId' : dataset_id,
'count' : count,
'quotaRemaining' : quota_remaining
}
| Python | 0.000001 |
9e577694d2f8665599d590299e58355dd7472011 | Fix less | cupy/logic/comparison.py | cupy/logic/comparison.py | from cupy.logic import ufunc
def allclose(a, b, rtol=1e-05, atol=1e-08):
# TODO(beam2d): Implement it
raise NotImplementedError
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False, allocator=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def array_equal(a1, a2):
# TODO(beam2d): Implement it
raise NotImplementedError
def array_equiv(a1, a2):
# TODO(beam2d): Implement it
raise NotImplementedError
greater = ufunc.create_comparison(
'greater', '>',
'''Tests elementwise if ``x1 > x2``.
.. seealso:: :data:`numpy.greater`
''')
greater_equal = ufunc.create_comparison(
'greater_equal', '>=',
'''Tests elementwise if ``x1 >= x2``.
.. seealso:: :data:`numpy.greater_equal`
''')
less = ufunc.create_comparison(
'less', '<',
'''Tests elementwise if ``x1 < x2``.
.. seealso:: :data:`numpy.less`
''')
less_equal = ufunc.create_comparison(
'less_equal', '<=',
'''Tests elementwise if ``x1 <= x2``.
.. seealso:: :data:`numpy.less_equal`
''')
equal = ufunc.create_comparison(
'equal', '==',
'''Tests elementwise if ``x1 == x2``.
.. seealso:: :data:`numpy.equal`
''')
not_equal = ufunc.create_comparison(
'not_equal', '!=',
'''Tests elementwise if ``x1 != x2``.
.. seealso:: :data:`numpy.equal`
''')
| from cupy.logic import ufunc
def allclose(a, b, rtol=1e-05, atol=1e-08):
# TODO(beam2d): Implement it
raise NotImplementedError
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False, allocator=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def array_equal(a1, a2):
# TODO(beam2d): Implement it
raise NotImplementedError
def array_equiv(a1, a2):
# TODO(beam2d): Implement it
raise NotImplementedError
greater = ufunc.create_comparison(
'greater', '>',
'''Tests elementwise if ``x1 > x2``.
.. seealso:: :data:`numpy.greater`
''')
greater_equal = ufunc.create_comparison(
'greater_equal', '>=',
'''Tests elementwise if ``x1 >= x2``.
.. seealso:: :data:`numpy.greater_equal`
''')
less = ufunc.create_comparison(
'less', '<'
'''Tests elementwise if ``x1 < x2``.
.. seealso:: :data:`numpy.less`
''')
less_equal = ufunc.create_comparison(
'less_equal', '<=',
'''Tests elementwise if ``x1 <= x2``.
.. seealso:: :data:`numpy.less_equal`
''')
equal = ufunc.create_comparison(
'equal', '==',
'''Tests elementwise if ``x1 == x2``.
.. seealso:: :data:`numpy.equal`
''')
not_equal = ufunc.create_comparison(
'not_equal', '!=',
'''Tests elementwise if ``x1 != x2``.
.. seealso:: :data:`numpy.equal`
''')
| Python | 0.000092 |
244f3262989b0331a120eb546ca22c9bea9194e4 | add DownloadDelta to the admin | crate_project/apps/packages/admin.py | crate_project/apps/packages/admin.py | from django.contrib import admin
from packages.models import Package, Release, ReleaseFile, TroveClassifier, PackageURI
from packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ChangeLog
from packages.models import DownloadDelta, ReadTheDocsPackageSlug
class PackageURIAdmin(admin.TabularInline):
model = PackageURI
extra = 0
class PackageAdmin(admin.ModelAdmin):
inlines = [PackageURIAdmin]
list_display = ["name", "created", "modified", "downloads_synced_on"]
list_filter = ["created", "modified", "downloads_synced_on"]
search_fields = ["name"]
class ReleaseRequireInline(admin.TabularInline):
model = ReleaseRequire
extra = 0
class ReleaseProvideInline(admin.TabularInline):
model = ReleaseProvide
extra = 0
class ReleaseObsoleteInline(admin.TabularInline):
model = ReleaseObsolete
extra = 0
class ReleaseFileInline(admin.TabularInline):
model = ReleaseFile
extra = 0
class ReleaseURIInline(admin.TabularInline):
model = ReleaseURI
extra = 0
class ReleaseAdmin(admin.ModelAdmin):
inlines = [ReleaseURIInline, ReleaseFileInline, ReleaseRequireInline, ReleaseProvideInline, ReleaseObsoleteInline]
list_display = ["__unicode__", "package", "version", "summary", "author", "author_email", "maintainer", "maintainer_email", "created", "modified"]
list_filter = ["created", "modified", "hidden"]
search_fields = ["package__name", "version", "summary", "author", "author_email", "maintainer", "maintainer_email"]
raw_id_fields = ["package"]
class TroveClassifierAdmin(admin.ModelAdmin):
list_display = ["trove"]
search_fields = ["trove"]
class ReleaseFileAdmin(admin.ModelAdmin):
list_display = ["release", "type", "python_version", "downloads", "comment", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["release__package__name", "filename", "digest"]
raw_id_fields = ["release"]
class DownloadDeltaAdmin(admin.ModelAdmin):
list_display = ["file", "date", "delta"]
list_filter = ["date"]
search_fields = ["file__release__package__name", "file__filename"]
raw_id_fields = ["file"]
class ChangeLogAdmin(admin.ModelAdmin):
list_display = ["package", "release", "type", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["package__name"]
raw_id_fields = ["package", "release"]
class ReadTheDocsPackageSlugAdmin(admin.ModelAdmin):
list_display = ["package", "slug"]
search_fields = ["package__name", "slug"]
raw_id_fields = ["package"]
admin.site.register(Package, PackageAdmin)
admin.site.register(Release, ReleaseAdmin)
admin.site.register(ReleaseFile, ReleaseFileAdmin)
admin.site.register(TroveClassifier, TroveClassifierAdmin)
admin.site.register(DownloadDelta, DownloadDeltaAdmin)
admin.site.register(ChangeLog, ChangeLogAdmin)
admin.site.register(ReadTheDocsPackageSlug, ReadTheDocsPackageSlugAdmin)
| from django.contrib import admin
from packages.models import Package, Release, ReleaseFile, TroveClassifier, PackageURI
from packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ChangeLog
from packages.models import ReadTheDocsPackageSlug
class PackageURIAdmin(admin.TabularInline):
model = PackageURI
extra = 0
class PackageAdmin(admin.ModelAdmin):
inlines = [PackageURIAdmin]
list_display = ["name", "created", "modified", "downloads_synced_on"]
list_filter = ["created", "modified", "downloads_synced_on"]
search_fields = ["name"]
class ReleaseRequireInline(admin.TabularInline):
model = ReleaseRequire
extra = 0
class ReleaseProvideInline(admin.TabularInline):
model = ReleaseProvide
extra = 0
class ReleaseObsoleteInline(admin.TabularInline):
model = ReleaseObsolete
extra = 0
class ReleaseFileInline(admin.TabularInline):
model = ReleaseFile
extra = 0
class ReleaseURIInline(admin.TabularInline):
model = ReleaseURI
extra = 0
class ReleaseAdmin(admin.ModelAdmin):
inlines = [ReleaseURIInline, ReleaseFileInline, ReleaseRequireInline, ReleaseProvideInline, ReleaseObsoleteInline]
list_display = ["__unicode__", "package", "version", "summary", "author", "author_email", "maintainer", "maintainer_email", "created", "modified"]
list_filter = ["created", "modified", "hidden"]
search_fields = ["package__name", "version", "summary", "author", "author_email", "maintainer", "maintainer_email"]
raw_id_fields = ["package"]
class TroveClassifierAdmin(admin.ModelAdmin):
list_display = ["trove"]
search_fields = ["trove"]
class ReleaseFileAdmin(admin.ModelAdmin):
list_display = ["release", "type", "python_version", "downloads", "comment", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["release__package__name", "filename", "digest"]
raw_id_fields = ["release"]
class ChangeLogAdmin(admin.ModelAdmin):
list_display = ["package", "release", "type", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["package__name"]
raw_id_fields = ["package", "release"]
class ReadTheDocsPackageSlugAdmin(admin.ModelAdmin):
list_display = ["package", "slug"]
search_fields = ["package__name", "slug"]
raw_id_fields = ["package"]
admin.site.register(Package, PackageAdmin)
admin.site.register(Release, ReleaseAdmin)
admin.site.register(ReleaseFile, ReleaseFileAdmin)
admin.site.register(TroveClassifier, TroveClassifierAdmin)
admin.site.register(ChangeLog, ChangeLogAdmin)
admin.site.register(ReadTheDocsPackageSlug, ReadTheDocsPackageSlugAdmin)
| Python | 0 |
4c703480fe395ddef5faa6d388a472b7053f26af | Add debug command line option. | jskom/__main__.py | jskom/__main__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import asyncio
import logging
from hypercorn.asyncio import serve
from hypercorn.config import Config
from jskom import app, init_app
log = logging.getLogger("jskom.main")
def run(host, port):
# use 127.0.0.1 instead of localhost to avoid delays related to ipv6.
# http://werkzeug.pocoo.org/docs/serving/#troubleshooting
init_app()
config = Config()
config.bind = ["{}:{}".format(host, port)]
asyncio.run(serve(app, config), debug=True)
def main():
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-15s %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description='Jskom')
parser.add_argument(
'--debug', help='Enable debug logging',
default=False, action='store_true')
# use 127.0.0.1 instead of localhost to avoid delays related to ipv6.
# http://werkzeug.pocoo.org/docs/serving/#troubleshooting
parser.add_argument(
'--host', help='Hostname or IP to listen on',
default='127.0.0.1')
parser.add_argument(
'--port', help='Port to listen on',
type=int, default=5000)
args = parser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.INFO
logging.getLogger().setLevel(loglevel)
if not args.debug:
# asyncio logs quite verbose also on INFO level, so set to WARNING.
logging.getLogger('asyncio').setLevel(logging.WARNING)
log.info("Using args: %s", args)
run(args.host, args.port)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import asyncio
import logging
from hypercorn.asyncio import serve
from hypercorn.config import Config
from jskom import app, init_app
log = logging.getLogger("jskom.main")
def run(host, port):
# use 127.0.0.1 instead of localhost to avoid delays related to ipv6.
# http://werkzeug.pocoo.org/docs/serving/#troubleshooting
init_app()
config = Config()
config.bind = ["{}:{}".format(host, port)]
asyncio.run(serve(app, config), debug=True)
def main():
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-15s %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Jskom')
# use 127.0.0.1 instead of localhost to avoid delays related to ipv6.
# http://werkzeug.pocoo.org/docs/serving/#troubleshooting
parser.add_argument('--host', help='Hostname or IP to listen on',
default='127.0.0.1')
parser.add_argument('--port', help='Port to listen on',
type=int, default=5000)
args = parser.parse_args()
log.info("Using args: %s", args)
run(args.host, args.port)
if __name__ == "__main__":
main()
| Python | 0 |
cdcc807ecd7126f533bbc01721276d62a4a72732 | fix all_docs dbs to work after flip | corehq/couchapps/__init__.py | corehq/couchapps/__init__.py | from corehq.preindex import CouchAppsPreindexPlugin
from django.conf import settings
CouchAppsPreindexPlugin.register('couchapps', __file__, {
'form_question_schema': 'meta',
'users_extra': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
'noneulized_users': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
'all_docs': (None, settings.NEW_USERS_GROUPS_DB),
})
| from corehq.preindex import CouchAppsPreindexPlugin
from django.conf import settings
CouchAppsPreindexPlugin.register('couchapps', __file__, {
'form_question_schema': 'meta',
'users_extra': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
'noneulized_users': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
'all_docs': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
})
| Python | 0 |
39f26d6bb46eeb96c54881ab9c0147051328b8e8 | fix another misuse of the 1.0 DB API. | trac/tests/env.py | trac/tests/env.py | from __future__ import with_statement
from trac import db_default
from trac.core import ComponentManager
from trac.env import Environment
import os.path
import unittest
import tempfile
import shutil
class EnvironmentCreatedWithoutData(Environment):
def __init__(self, path, create=False, options=[]):
ComponentManager.__init__(self)
self.path = path
self.systeminfo = []
self._href = self._abs_href = None
if create:
self.create(options)
else:
self.verify()
self.setup_config()
class EmptyEnvironmentTestCase(unittest.TestCase):
def setUp(self):
env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
self.env = EnvironmentCreatedWithoutData(env_path, create=True)
def tearDown(self):
self.env.shutdown() # really closes the db connections
shutil.rmtree(self.env.path)
def test_get_version(self):
"""Testing env.get_version"""
assert self.env.get_version() is False, self.env.get_version()
class EnvironmentTestCase(unittest.TestCase):
def setUp(self):
env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
self.env = Environment(env_path, create=True)
def tearDown(self):
self.env.shutdown() # really closes the db connections
shutil.rmtree(self.env.path)
def test_get_version(self):
"""Testing env.get_version"""
assert self.env.get_version() == db_default.db_version
def test_get_known_users(self):
"""Testing env.get_known_users"""
with self.env.db_transaction as db:
db.executemany("INSERT INTO session VALUES (%s,%s,0)",
[('123', 0),('tom', 1), ('joe', 1), ('jane', 1)])
db.executemany("INSERT INTO session_attribute VALUES (%s,%s,%s,%s)",
[('123', 0, 'email', 'a@example.com'),
('tom', 1, 'name', 'Tom'),
('tom', 1, 'email', 'tom@example.com'),
('joe', 1, 'email', 'joe@example.com'),
('jane', 1, 'name', 'Jane')])
users = {}
for username, name, email in self.env.get_known_users():
users[username] = (name, email)
assert not users.has_key('anonymous')
self.assertEqual(('Tom', 'tom@example.com'), users['tom'])
self.assertEqual((None, 'joe@example.com'), users['joe'])
self.assertEqual(('Jane', None), users['jane'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EnvironmentTestCase, 'test'))
suite.addTest(unittest.makeSuite(EmptyEnvironmentTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| from __future__ import with_statement
from trac import db_default
from trac.core import ComponentManager
from trac.env import Environment
import os.path
import unittest
import tempfile
import shutil
class EnvironmentCreatedWithoutData(Environment):
def __init__(self, path, create=False, options=[]):
ComponentManager.__init__(self)
self.path = path
self.systeminfo = []
self._href = self._abs_href = None
if create:
self.create(options)
else:
self.verify()
self.setup_config()
class EmptyEnvironmentTestCase(unittest.TestCase):
def setUp(self):
env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
self.env = EnvironmentCreatedWithoutData(env_path, create=True)
def tearDown(self):
with self.env.db_query as db:
db.close()
self.env.shutdown() # really closes the db connections
shutil.rmtree(self.env.path)
def test_get_version(self):
"""Testing env.get_version"""
assert self.env.get_version() is False, self.env.get_version()
class EnvironmentTestCase(unittest.TestCase):
def setUp(self):
env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
self.env = Environment(env_path, create=True)
def tearDown(self):
with self.env.db_query as db:
db.close()
self.env.shutdown() # really closes the db connections
shutil.rmtree(self.env.path)
def test_get_version(self):
"""Testing env.get_version"""
assert self.env.get_version() == db_default.db_version
def test_get_known_users(self):
"""Testing env.get_known_users"""
with self.env.db_transaction as db:
db.executemany("INSERT INTO session VALUES (%s,%s,0)",
[('123', 0),('tom', 1), ('joe', 1), ('jane', 1)])
db.executemany("INSERT INTO session_attribute VALUES (%s,%s,%s,%s)",
[('123', 0, 'email', 'a@example.com'),
('tom', 1, 'name', 'Tom'),
('tom', 1, 'email', 'tom@example.com'),
('joe', 1, 'email', 'joe@example.com'),
('jane', 1, 'name', 'Jane')])
users = {}
for username, name, email in self.env.get_known_users():
users[username] = (name, email)
assert not users.has_key('anonymous')
self.assertEqual(('Tom', 'tom@example.com'), users['tom'])
self.assertEqual((None, 'joe@example.com'), users['joe'])
self.assertEqual(('Jane', None), users['jane'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EnvironmentTestCase, 'test'))
suite.addTest(unittest.makeSuite(EmptyEnvironmentTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python | 0.000005 |
a27e667dedeaaa0aefadc3328149f311bb277c45 | Update bottlespin.py | bottlespin/bottlespin.py | bottlespin/bottlespin.py | import discord
from discord.ext import commands
from random import choice
class Bottlespin:
"""Spins a bottle and lands on a random user."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True, alias=["bottlespin"])
async def spin(self, ctx, role):
"""Spin the bottle"""
await self.bot.say(str(role))
roles = [ctx.message.server.roles]
await self.bot.say(str(roles[1]))
if role in roles:
await self.bot.say(str(role))
await self.bot.say(str(roles))
author = ctx.message.author
server = ctx.message.server
if len(server.members) < 2:
await self.bot.say("`Not enough people are around to spin the bottle`")
return
if role in roles:
roleexist = True
else:
await self.bot.say("`{} is not a exising role`".format(role))
return
if roleexist:
target = [m for m in server.members if m != author and role in [
s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"]
else:
target = [m for m in server.members if m != author and str(
m.status) == "online" or str(m.status) == "idle"]
if not target:
if role:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role))
else:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`")
return
else:
target = choice(list(target))
await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target))
def setup(bot):
n = Bottlespin(bot)
bot.add_cog(n)
| import discord
from discord.ext import commands
from random import choice
class Bottlespin:
"""Spins a bottle and lands on a random user."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True, alias=["bottlespin"])
async def spin(self, ctx, role):
"""Spin the bottle"""
roles = [ctx.message.server.roles]
role = discord.Role.name
if role in roles:
await self.bot.say(str(role))
await self.bot.say(str(roles))
author = ctx.message.author
server = ctx.message.server
if len(server.members) < 2:
await self.bot.say("`Not enough people are around to spin the bottle`")
return
if role in roles:
roleexist = True
else:
await self.bot.say("`{} is not a exising role`".format(role))
return
if roleexist:
target = [m for m in server.members if m != author and role in [
s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"]
else:
target = [m for m in server.members if m != author and str(
m.status) == "online" or str(m.status) == "idle"]
if not target:
if role:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role))
else:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`")
return
else:
target = choice(list(target))
await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target))
def setup(bot):
n = Bottlespin(bot)
bot.add_cog(n)
| Python | 0 |
7a25ace4851da30a252842b5d5e3a7efee90ce00 | Raise error when /boundaries/set-slug URL points to a nonexistent set | boundaryservice/views.py | boundaryservice/views.py | from django.contrib.gis.db import models
from django.http import Http404
from boundaryservice.base_views import (ModelListView, ModelDetailView,
ModelGeoListView, ModelGeoDetailView)
from boundaryservice.models import BoundarySet, Boundary
class BoundarySetListView(ModelListView):
""" e.g. /boundary-set/ """
filterable_fields = ['name', 'domain', 'hierarchy']
model = BoundarySet
class BoundarySetDetailView(ModelDetailView):
""" e.g. /boundary-set/federal-electoral-districts/ """
model = BoundarySet
def get_object(self, request, qs, slug):
try:
return qs.get(slug=slug)
except BoundarySet.DoesNotExist:
raise Http404
class BoundaryListView(ModelGeoListView):
""" e.g. /boundary/federal-electoral-districts/
or /boundary/federal-electoral-districts/centroid """
filterable_fields = ['external_id', 'name']
allowed_geo_fields = ('shape', 'simple_shape', 'centroid')
default_geo_filter_field = 'shape'
model = Boundary
def filter(self, request, qs):
qs = super(BoundaryListView, self).filter(request, qs)
if 'intersects' in request.GET:
(set_slug, slug) = request.GET['intersects'].split('/')
try:
shape = Boundary.objects.filter(slug=slug, set=set_slug).values_list('shape', flat=True)[0]
except IndexError:
raise Http404
qs = qs.filter(models.Q(shape__covers=shape) | models.Q(shape__overlaps=shape))
if 'touches' in request.GET:
(set_slug, slug) = request.GET['touches'].split('/')
try:
shape = Boundary.objects.filter(slug=slug, set=set_slug).values_list('shape', flat=True)[0]
except IndexError:
raise Http404
qs = qs.filter(shape__touches=shape)
if 'sets' in request.GET:
set_slugs = request.GET['sets'].split(',')
qs = qs.filter(set__in=set_slugs)
return qs
def get_qs(self, request, set_slug=None):
qs = super(BoundaryListView, self).get_qs(request)
if set_slug:
if not BoundarySet.objects.filter(slug=set_slug).exists():
raise Http404
return qs.filter(set=set_slug)
return qs
class BoundaryObjectGetterMixin(object):
model = Boundary
def get_object(self, request, qs, set_slug, slug):
try:
return qs.get(slug=slug, set=set_slug)
except Boundary.DoesNotExist:
raise Http404
class BoundaryDetailView(ModelDetailView, BoundaryObjectGetterMixin):
""" e.g. /boundary/federal-electoral-districts/outremont/ """
def __init__(self):
super(BoundaryDetailView, self).__init__()
self.base_qs = self.base_qs.defer('shape', 'simple_shape', 'centroid')
class BoundaryGeoDetailView(ModelGeoDetailView, BoundaryObjectGetterMixin):
""" e.g /boundary/federal-electoral-districts/outremont/shape """
allowed_geo_fields = ('shape', 'simple_shape', 'centroid')
| from django.contrib.gis.db import models
from django.http import Http404
from boundaryservice.base_views import (ModelListView, ModelDetailView,
ModelGeoListView, ModelGeoDetailView)
from boundaryservice.models import BoundarySet, Boundary
class BoundarySetListView(ModelListView):
""" e.g. /boundary-set/ """
filterable_fields = ['name', 'domain', 'hierarchy']
model = BoundarySet
class BoundarySetDetailView(ModelDetailView):
""" e.g. /boundary-set/federal-electoral-districts/ """
model = BoundarySet
def get_object(self, request, qs, slug):
try:
return qs.get(slug=slug)
except BoundarySet.DoesNotExist:
raise Http404
class BoundaryListView(ModelGeoListView):
""" e.g. /boundary/federal-electoral-districts/
or /boundary/federal-electoral-districts/centroid """
filterable_fields = ['external_id', 'name']
allowed_geo_fields = ('shape', 'simple_shape', 'centroid')
default_geo_filter_field = 'shape'
model = Boundary
def filter(self, request, qs):
qs = super(BoundaryListView, self).filter(request, qs)
if 'intersects' in request.GET:
(set_slug, slug) = request.GET['intersects'].split('/')
try:
shape = Boundary.objects.filter(slug=slug, set=set_slug).values_list('shape', flat=True)[0]
except IndexError:
raise Http404
qs = qs.filter(models.Q(shape__covers=shape) | models.Q(shape__overlaps=shape))
if 'touches' in request.GET:
(set_slug, slug) = request.GET['touches'].split('/')
try:
shape = Boundary.objects.filter(slug=slug, set=set_slug).values_list('shape', flat=True)[0]
except IndexError:
raise Http404
qs = qs.filter(shape__touches=shape)
if 'sets' in request.GET:
set_slugs = request.GET['sets'].split(',')
qs = qs.filter(set__in=set_slugs)
return qs
def get_qs(self, request, set_slug=None):
qs = super(BoundaryListView, self).get_qs(request)
if set_slug:
return qs.filter(set=set_slug)
return qs
class BoundaryObjectGetterMixin(object):
model = Boundary
def get_object(self, request, qs, set_slug, slug):
try:
return qs.get(slug=slug, set=set_slug)
except Boundary.DoesNotExist:
raise Http404
class BoundaryDetailView(ModelDetailView, BoundaryObjectGetterMixin):
""" e.g. /boundary/federal-electoral-districts/outremont/ """
def __init__(self):
super(BoundaryDetailView, self).__init__()
self.base_qs = self.base_qs.defer('shape', 'simple_shape', 'centroid')
class BoundaryGeoDetailView(ModelGeoDetailView, BoundaryObjectGetterMixin):
""" e.g /boundary/federal-electoral-districts/outremont/shape """
allowed_geo_fields = ('shape', 'simple_shape', 'centroid')
| Python | 0 |
c63463ff040f79c605d6c0414261527dda3ed00a | Switch to new babel version in require test. | tests/test_jsinterpreter.py | tests/test_jsinterpreter.py | import unittest
from dukpy._dukpy import JSRuntimeError
import dukpy
from diffreport import report_diff
class TestJSInterpreter(unittest.TestCase):
def test_interpreter_keeps_context(self):
interpreter = dukpy.JSInterpreter()
ans = interpreter.evaljs("var o = {'value': 5}; o")
assert ans == {'value': 5}
ans = interpreter.evaljs("o.value += 1; o")
assert ans == {'value': 6}
def test_call_python(self):
def _say_hello(num, who):
return 'Hello ' + ' '.join([who]*num)
interpreter = dukpy.JSInterpreter()
interpreter.export_function('say_hello', _say_hello)
res = interpreter.evaljs("call_python('say_hello', 3, 'world')")
assert res == 'Hello world world world', res
def test_module_loader(self):
interpreter = dukpy.JSInterpreter()
res = interpreter.evaljs('''
babel = require('babel-6.26.0.min');
babel.transform(dukpy.es6code, {presets: ["es2015"]}).code;
''', es6code='let i=5;')
expected = '''"use strict";
var i = 5;'''
assert res == expected, report_diff(expected, res)
def test_module_loader_unexisting(self):
interpreter = dukpy.JSInterpreter()
with self.assertRaises(JSRuntimeError) as err:
interpreter.evaljs("require('missing_module');")
assert 'cannot find module: missing_module' in str(err.exception)
| import unittest
from dukpy._dukpy import JSRuntimeError
import dukpy
from diffreport import report_diff
class TestJSInterpreter(unittest.TestCase):
def test_interpreter_keeps_context(self):
interpreter = dukpy.JSInterpreter()
ans = interpreter.evaljs("var o = {'value': 5}; o")
assert ans == {'value': 5}
ans = interpreter.evaljs("o.value += 1; o")
assert ans == {'value': 6}
def test_call_python(self):
def _say_hello(num, who):
return 'Hello ' + ' '.join([who]*num)
interpreter = dukpy.JSInterpreter()
interpreter.export_function('say_hello', _say_hello)
res = interpreter.evaljs("call_python('say_hello', 3, 'world')")
assert res == 'Hello world world world', res
def test_module_loader(self):
interpreter = dukpy.JSInterpreter()
res = interpreter.evaljs('''
babel = require('babel-6.14.0.min');
babel.transform(dukpy.es6code, {presets: ["es2015"]}).code;
''', es6code='let i=5;')
expected = '''"use strict";
var i = 5;'''
assert res == expected, report_diff(expected, res)
def test_module_loader_unexisting(self):
interpreter = dukpy.JSInterpreter()
with self.assertRaises(JSRuntimeError) as err:
interpreter.evaljs("require('missing_module');")
assert 'cannot find module: missing_module' in str(err.exception)
| Python | 0 |
3681ada3917d5811e1e959270e1df0edea7ebf55 | Update __init__.py | mapclientplugins/smoothfitstep/__init__.py | mapclientplugins/smoothfitstep/__init__.py |
'''
MAP Client Plugin
'''
__version__ = '0.1.0'
__author__ = 'Richard Christie'
__stepname__ = 'smoothfit'
__location__ = ''
# import class that derives itself from the step mountpoint.
from mapclientplugins.smoothfitstep import step
# Import the resource file when the module is loaded,
# this enables the framework to use the step icon.
from . import resources_rc
|
'''
MAP Client Plugin
'''
__version__ = '0.1.0'
__author__ = 'Richard Christie'
__stepname__ = 'smoothfit'
__location__ = ''
# import class that derives itself from the step mountpoint.
from mapclientplugins.smoothfitstep import step
# Import the resource file when the module is loaded,
# this enables the framework to use the step icon.
from . import resources_rc | Python | 0.000072 |
379d2df1041605d3c8a21d543f9955601ee07558 | Add threading to syncer | imageledger/management/commands/syncer.py | imageledger/management/commands/syncer.py | from collections import namedtuple
import itertools
import logging
from multiprocessing.dummy import Pool as ThreadPool
from elasticsearch import helpers
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from imageledger import models, search
console = logging.StreamHandler()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
MAX_CONNECTION_RETRIES = 10
RETRY_WAIT = 5 # Number of sections to wait before retrying
DEFAULT_CHUNK_SIZE = 1000
class Command(BaseCommand):
can_import_settings = True
requires_migrations_checks = True
def add_arguments(self, parser):
parser.add_argument("--verbose",
action="store_true",
default=False,
help="Be very chatty and run logging at DEBUG")
parser.add_argument("--chunk-size",
dest="chunk_size",
default=DEFAULT_CHUNK_SIZE,
type=int,
help="The number of records to batch process at once")
parser.add_argument("--with-fingerprinting",
dest="with_fingerprinting",
action="store_true",
help="Whether to run the expensive perceptual hash routine as part of syncing")
def handle(self, *args, **options):
if options['verbose']:
log.addHandler(console)
log.setLevel(logging.DEBUG)
self.sync_all_images(chunk_size=options['chunk_size'], with_fingerprinting=options['with_fingerprinting'])
def sync_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, with_fingerprinting=False, num_iterations=5):
"""Sync all of the images, sorting from least-recently-synced"""
pool = ThreadPool(4)
starts = [i * chunk_size for i in range(0, num_iterations)]
pool.starmap(do_sync, zip(starts, itertools.repeat(chunk_size, num_iterations), itertools.repeat(with_fingerprinting, num_iterations)))
pool.close()
pool.join()
def do_sync(start, chunk_size, with_fingerprinting):
end = start + chunk_size
log.info("Starting sync in range from %d to %d...", start, end)
imgs = models.Image.objects.all().order_by('-last_synced_with_source')[start:end]
for img in imgs:
img.sync(attempt_perceptual_hash=with_fingerprinting)
| from collections import namedtuple
import itertools
import logging
from elasticsearch import helpers
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from imageledger import models, search
console = logging.StreamHandler()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
MAX_CONNECTION_RETRIES = 10
RETRY_WAIT = 5 # Number of sections to wait before retrying
DEFAULT_CHUNK_SIZE = 1000
class Command(BaseCommand):
can_import_settings = True
requires_migrations_checks = True
def add_arguments(self, parser):
parser.add_argument("--verbose",
action="store_true",
default=False,
help="Be very chatty and run logging at DEBUG")
parser.add_argument("--chunk-size",
dest="chunk_size",
default=DEFAULT_CHUNK_SIZE,
type=int,
help="The number of records to batch process at once")
parser.add_argument("--with-fingerprinting",
dest="with_fingerprinting",
action="store_true",
help="Whether to run the expensive perceptual hash routine as part of syncing")
def handle(self, *args, **options):
if options['verbose']:
log.addHandler(console)
log.setLevel(logging.DEBUG)
self.sync_all_images(chunk_size=options['chunk_size'], with_fingerprinting=options['with_fingerprinting'])
def sync_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, with_fingerprinting=False, num_iterations=1000):
"""Sync all of the images, sorting from least-recently-synced"""
count = 0
while count < num_iterations:
imgs = models.Image.objects.all().order_by('-last_synced_with_source')[0:chunk_size]
for img in imgs:
img.sync(attempt_perceptual_hash=with_fingerprinting)
count += 1
| Python | 0.000001 |
33b7e9371305c4171594c21c154cd5724ea013cb | allow segment and overlap be specified as a parameter | scripts/nanopolish_makerange.py | scripts/nanopolish_makerange.py | import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
for n in xrange(0, length, SEGMENT_LENGTH):
if ( n + SEGMENT_LENGTH) > length:
print "%s:%d-%d" % (name, n, length - 1)
else:
print "%s:%d-%d" % (name, n, n + SEGMENT_LENGTH + OVERLAP_LENGTH)
| import sys
from Bio import SeqIO
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(sys.argv[1]), "fasta")]
SEGMENT_LENGTH = 50000
OVERLAP_LENGTH = 200
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
for n in xrange(0, length, SEGMENT_LENGTH):
if ( n + SEGMENT_LENGTH) > length:
print "%s:%d-%d" % (name, n, length - 1)
else:
print "%s:%d-%d" % (name, n, n + SEGMENT_LENGTH + OVERLAP_LENGTH)
| Python | 0 |
105a413b18456f9a505dd1ed4bf515987b4792d2 | add --force option to management command to force all files to be pushed | mediasync/management/commands/syncmedia.py | mediasync/management/commands/syncmedia.py | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import mediasync
class Command(BaseCommand):
help = "Sync local media with S3"
args = '[options]'
requires_model_validation = False
option_list = BaseCommand.option_list + (
make_option("-F", "--force", dest="force", help="force files to sync", action="store_true"),
)
def handle(self, *args, **options):
force = options.get('force') or False
try:
mediasync.sync(force=force)
except ValueError, ve:
raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args)) | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import mediasync
class Command(BaseCommand):
help = "Sync local media with S3"
args = '[options]'
requires_model_validation = False
option_list = BaseCommand.option_list + (
make_option("-f", "--force", dest="force", help="force files to sync", action="store_true"),
)
def handle(self, *args, **options):
force = options.get('force') or False
try:
mediasync.sync(force=force)
except ValueError, ve:
raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args)) | Python | 0 |
0be6bddf8c92c461af57e7c61c2378c817fb0143 | Make oppetarkiv work with --all-episodes again | lib/svtplay_dl/service/oppetarkiv.py | lib/svtplay_dl/service/oppetarkiv.py | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
from svtplay_dl.service.svtplay import Svtplay
from svtplay_dl.log import log
class OppetArkiv(Svtplay):
supported_domains = ['oppetarkiv.se']
def find_all_episodes(self, options):
page = 1
data = self.get_urldata()
match = re.search(r'"/etikett/titel/([^"/]+)', data)
if match is None:
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
if match is None:
log.error("Couldn't find title")
return
program = match.group(1)
episodes = []
n = 0
if options.all_last > 0:
sort = "tid_fallande"
else:
sort = "tid_stigande"
while True:
url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort)
data = self.http.request("get", url)
if data.status_code == 404:
break
data = data.text
regex = re.compile(r'href="(/video/[^"]+)"')
for match in regex.finditer(data):
if n == options.all_last:
break
episodes.append("http://www.oppetarkiv.se%s" % match.group(1))
n += 1
page += 1
return episodes
| # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
from svtplay_dl.service.svtplay import Svtplay
from svtplay_dl.log import log
class OppetArkiv(Svtplay):
supported_domains = ['oppetarkiv.se']
def find_all_episodes(self, options):
page = 1
data = self.get_urldata()
match = re.search(r'"/etikett/titel/([^"/]+)', data)
if match is None:
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
if match is None:
log.error("Couldn't find title")
return
program = match.group(1)
more = True
episodes = []
n = 0
if options.all_last > 0:
sort = "tid_fallande"
else:
sort = "tid_stigande"
while more:
url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort)
data = self.http.request("get", url).text
visa = re.search(r'svtXColorDarkLightGrey', data)
if not visa:
more = False
regex = re.compile(r'href="(/video/[^"]+)"')
for match in regex.finditer(data):
if n == options.all_last:
break
episodes.append("http://www.oppetarkiv.se%s" % match.group(1))
n += 1
page += 1
return episodes
| Python | 0 |
a52b4097dfcb9fea26af0bc994426baecb97efc1 | update image if streetview url | croplands_api/views/api/locations.py | croplands_api/views/api/locations.py | from croplands_api import api
from croplands_api.models import Location
from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post
from records import save_record_state_to_history
from croplands_api.utils.s3 import upload_image
import requests
import uuid
import cStringIO
def process_records(result=None, **kwargs):
"""
This processes all records that may have been posted as a relation of the location.
:param result:
:param kwargs:
:return: None
"""
for record in result['records']:
save_record_state_to_history(record)
def merge_same_location_lat_long(data=None, **kwargs):
"""
This preprocessor checks if the location already exists.
:param data:
:param kwargs:
:return:
"""
# TODO
pass
def change_field_names(data=None, **kwargs):
if 'photos' in data:
data['images'] = data['photos']
del data['photos']
def check_for_street_view_image(data=None, **kwargs):
if 'images' not in data:
return
for image in data['images']:
if 'source' in image and image['source'] == 'streetview':
try:
r = requests.get(image['url'])
if r.status_code == 200:
url = 'images/streetview/' + str(uuid.uuid4()) + '.jpg'
image['url'] = url
except Exception as e:
print(e)
def create(app):
api.create_api(Location,
app=app,
collection_name='locations',
methods=['GET', 'POST', 'PATCH', 'DELETE'],
preprocessors={
'POST': [change_field_names, add_user_to_posted_data, debug_post,
check_for_street_view_image],
'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']),
remove_relations],
'PATCH_MANY': [api_roles('admin'), remove_relations],
'DELETE': [api_roles('admin')]
},
postprocessors={
'POST': [process_records],
'PATCH_SINGLE': [],
'PATCH_MANY': [],
'DELETE': []
},
results_per_page=10) | from croplands_api import api
from croplands_api.models import Location
from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post
from records import save_record_state_to_history
from croplands_api.tasks.records import get_ndvi
def process_records(result=None, **kwargs):
"""
This processes all records that may have been posted as a relation of the location.
:param result:
:param kwargs:
:return: None
"""
for record in result['records']:
save_record_state_to_history(record)
def merge_same_location_lat_long(data=None, **kwargs):
"""
This preprocessor checks if the location already exists.
:param data:
:param kwargs:
:return:
"""
# TODO
pass
def change_field_names(data=None, **kwargs):
if 'photos' in data:
data['images'] = data['photos']
del data['photos']
def create(app):
api.create_api(Location,
app=app,
collection_name='locations',
methods=['GET', 'POST', 'PATCH', 'DELETE'],
preprocessors={
'POST': [change_field_names, add_user_to_posted_data, debug_post],
'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']), remove_relations],
'PATCH_MANY': [api_roles('admin'), remove_relations],
'DELETE': [api_roles('admin')]
},
postprocessors={
'POST': [process_records],
'PATCH_SINGLE': [],
'PATCH_MANY': [],
'DELETE': []
},
results_per_page=10) | Python | 0.000005 |
1d305388fd1c673096e327ea2c0259b955d64156 | Update test_step_7.py | pySDC/tests/test_tutorials/test_step_7.py | pySDC/tests/test_tutorials/test_step_7.py | import os
import subprocess
import pytest
from pySDC.tutorial.step_7.B_pySDC_with_mpi4pyfft import main as main_B
@pytest.mark.fenics
def test_A():
from pySDC.tutorial.step_7.A_pySDC_with_FEniCS import main as main_A
main_A()
@pytest.mark.parallel
def test_B():
main_B()
@pytest.mark.parallel
def test_C_1x1():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
# set up new/empty file for output
fname = 'step_7_C_out_1x1.txt'
f = open(fname, 'w')
f.close()
num_procs = 1
num_procs_space = 1
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.parallel
def test_C_1x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
fname = 'step_7_C_out_1x2.txt'
f = open(fname, 'w')
f.close()
num_procs = 2
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.parallel
def test_C_2x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
fname = 'step_7_C_out_2x2.txt'
f = open(fname, 'w')
f.close()
num_procs = 4
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
| import os
import subprocess
import pytest
from pySDC.tutorial.step_7.B_pySDC_with_mpi4pyfft import main as main_B
@pytest.mark.fenics
def test_A():
from pySDC.tutorial.step_7.A_pySDC_with_FEniCS import main as main_A
main_A()
@pytest.mark.parallel
def test_B():
main_B()
@pytest.mark.parallel
def test_C_1x1():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
# set up new/empty file for output
fname = 'step_7_C_out_1x1.txt'
f = open(fname, 'w')
f.close()
num_procs = 1
num_procs_space = 1
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.parallel
def test_C_1x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
fname = 'step_7_C_out_1x2.txt'
f = open(fname, 'w')
f.close()
num_procs = 2
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.parallel
def test_C_2x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
fname = 'step_7_C_out_2x2.txt'
f = open(fname, 'w')
f.close()
num_procs = 4
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
| Python | 0.000014 |
928d498b5f67970f9ec75d62068e8cbec0fdc352 | Update python3, flake8 | ni_scanner.py | ni_scanner.py | from ConfigParser import SafeConfigParser
from utils.cli import CLI
from api.queue import Queue
from api.nerds import NerdsApi
from scanner.host import HostScanner
from scanner.exceptions import ScannerExeption
from utils.url import url_concat
import logging
FORMAT = '%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('ni_scanner')
def process_host(queue, nerds_api):
item = queue.next("Host")
while item:
try:
queue.processing(item)
scanner = HostScanner(item)
nerds = scanner.process()
if not nerds:
# Error occured :(
logger.error("Unable to scan item %s", str(item))
queue.failed(item)
else:
logger.debug("Posting nerds data")
nerds_api.send(nerds)
queue.done(item)
except ScannerExeption as e:
logger.error("%s", e)
failed(queue, item)
except Exception as e:
logger.error("Unable to process host %s got error: %s", item, str(e))
failed(queue, item)
item = queue.next("Host")
def failed(queue, item):
try:
queue.failed(item)
except Exception as e:
logger.error("Problem with reaching NI, got error: %s", e)
def main():
args = CLI().options()
try:
config = SafeConfigParser()
config.readfp(open(args.config))
except IOError:
logger.error("Config file '%s' is missing", args.config)
return None
# ready :)
api_user = config.get("NI", "api_user")
api_key = config.get("NI", "api_key")
queue_url = url_concat(config.get("NI", "url"), "scan_queue/")
queue = Queue(queue_url, api_user, api_key)
nerds_url = url_concat(config.get("NI", "url"), "nerds/")
nerds_api = NerdsApi(nerds_url, api_user, api_key)
process_host(queue, nerds_api)
if __name__ == "__main__":
main()
| from ConfigParser import SafeConfigParser
from utils.cli import CLI
from api.queue import Queue
from api.nerds import NerdsApi
from scanner.host import HostScanner
from scanner.exceptions import ScannerExeption
from utils.url import url_concat
import logging
FORMAT = '%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('ni_scanner')
def process_host(queue, nerds_api):
item = queue.next("Host")
while item:
try:
queue.processing(item)
scanner = HostScanner(item)
nerds = scanner.process()
if not nerds:
# Error occured :(
logger.error("Unable to scan item "+str(item))
queue.failed(item)
else:
logger.debug("Posting nerds data")
nerds_api.send(nerds)
queue.done(item)
except ScannerExeption as e:
logger.error("%s",e)
failed(queue,item)
except Exception as e:
logger.error("Unable to process host %s got error: %s",item,str(e))
failed(queue,item)
item = queue.next("Host")
def failed(queue,item):
try:
queue.failed(item)
except Exception as e:
logger.error("Problem with reaching NI, got error: %s", e)
def main():
args = CLI().options()
try:
config = SafeConfigParser()
config.readfp(open(args.config))
except IOError as (errno, strerror):
logger.error("Config file '%s' is missing", args.config)
return None
## ready :)
api_user = config.get("NI", "api_user")
api_key = config.get("NI", "api_key")
queue_url = url_concat(config.get("NI", "url"), "scan_queue/")
queue = Queue(queue_url, api_user, api_key)
nerds_url = url_concat(config.get("NI", "url"), "nerds/")
nerds_api = NerdsApi(nerds_url, api_user, api_key)
process_host(queue, nerds_api)
if __name__ == "__main__":
main()
| Python | 0.000004 |
496007543f941bb3ca46c011383f2673b9362e47 | Bump development version | debreach/__init__.py | debreach/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.4.1'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.4.0'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
| Python | 0 |
31d6ce09382035458eca2a310f99cb3c958ea604 | Use main template environment for rendering document content | nib/render.py | nib/render.py | import jinja2
from jinja2 import Environment, FileSystemLoader, Template
from os import path
import time
jinja_filters = {}
def jinja(name):
def decorator(f):
jinja_filters[name] = f
return f
return decorator
class Render(object):
def __init__(self, options, documents):
self.options = options
self.documents = documents
self.loader = FileSystemLoader(path.abspath(options['template_path']))
self.env = Environment(loader=self.loader)
for name in jinja_filters:
self.env.filters[name] = jinja_filters[name]
self.site = dict(options['site'], documents=documents)
self.now = time.time()
def render_content(self, document):
params = {
'now': self.now,
'site': self.site,
'page': document,
}
params.update(document)
document.short = self.env.from_string(document.short).render(**params)
document.content = self.env.from_string(document.content).render(**params)
def render_template(self, document):
if 'template' in document:
template = self.env.get_template(document['template'])
params = {
'now': self.now,
'site': self.options['site'],
'page': document,
'content': document.content,
'short': document.short,
}
params.update(document)
return template.render(**params)
else:
return document.content
| import jinja2
from jinja2 import Environment, FileSystemLoader, Template
from os import path
import time
jinja_filters = {}
def jinja(name):
def decorator(f):
jinja_filters[name] = f
return f
return decorator
class Render(object):
def __init__(self, options, documents):
self.options = options
self.documents = documents
self.loader = FileSystemLoader(path.abspath(options['template_path']))
self.env = Environment(loader=self.loader)
for name in jinja_filters:
self.env.filters[name] = jinja_filters[name]
self.site = dict(options['site'], documents=documents)
self.now = time.time()
def render_content(self, document):
params = {
'now': self.now,
'site': self.site,
'page': document,
}
params.update(document)
document.short = Template(document.short).render(**params)
document.content = Template(document.content).render(**params)
def render_template(self, document):
if 'template' in document:
template = self.env.get_template(document['template'])
params = {
'now': self.now,
'site': self.options['site'],
'page': document,
'content': document.content,
'short': document.short,
}
params.update(document)
return template.render(**params)
else:
return document.content
| Python | 0 |
76ad51d4161bca9435358a07cc9a726dc0ce8a8b | Add document boosts to search indexes (reorder by boost) | csunplugged/topics/search_indexes.py | csunplugged/topics/search_indexes.py | """Search index for topics models.
Note: Document boosting for Whoosh backend is with keyword '_boost' instead
of 'boost'.
"""
from haystack import indexes
from topics.models import (
Topic,
UnitPlan,
Lesson,
ProgrammingChallenge,
CurriculumIntegration,
CurriculumArea,
)
class TopicIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for Topic model."""
text = indexes.NgramField(document=True, use_template=True)
def prepare(self, obj):
data = super(TopicIndex, self).prepare(obj)
data["_boost"] = 1.4
return data
def get_model(self):
"""Return the Topic model.
Returns:
Topic object.
"""
return Topic
class UnitPlanIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for UnitPlan model."""
text = indexes.NgramField(document=True, use_template=True)
topic = indexes.CharField(model_attr="topic")
def prepare(self, obj):
data = super(UnitPlanIndex, self).prepare(obj)
data["_boost"] = 1.2
return data
def get_model(self):
"""Return the UnitPlan model.
Returns:
UnitPlan object.
"""
return UnitPlan
class LessonIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for Lesson model."""
text = indexes.NgramField(document=True, use_template=True)
topic = indexes.CharField(model_attr="topic")
unit_plan = indexes.CharField(model_attr="unit_plan")
def prepare(self, obj):
data = super(LessonIndex, self).prepare(obj)
data["_boost"] = 1
return data
def get_model(self):
"""Return the Lesson model.
Returns:
Lesson object.
"""
return Lesson
class CurriculumIntegrationIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for CurriculumIntegration model."""
text = indexes.NgramField(document=True, use_template=True, boost=1.2)
topic = indexes.CharField(model_attr="topic")
def prepare(self, obj):
data = super(CurriculumIntegrationIndex, self).prepare(obj)
data["_boost"] = 0.8
return data
def get_model(self):
"""Return the CurriculumIntegration model.
Returns:
CurriculumIntegration object.
"""
return CurriculumIntegration
class ProgrammingChallengeIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for ProgrammingChallenge model."""
text = indexes.NgramField(document=True, use_template=True)
topic = indexes.CharField(model_attr="topic")
def prepare(self, obj):
data = super(ProgrammingChallengeIndex, self).prepare(obj)
data["_boost"] = 0.4
return data
def get_model(self):
"""Return the ProgrammingChallenge model.
Returns:
ProgrammingChallenge object.
"""
return ProgrammingChallenge
| """Search index for topics models."""
from haystack import indexes
from topics.models import (
Topic,
UnitPlan,
Lesson,
ProgrammingChallenge,
CurriculumIntegration,
CurriculumArea,
)
class TopicIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for Topic model."""
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
"""Return the Topic model.
Returns:
Topic object.
"""
return Topic
class UnitPlanIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for UnitPlan model."""
text = indexes.CharField(document=True, use_template=True)
topic = indexes.CharField(model_attr='topic')
def get_model(self):
"""Return the UnitPlan model.
Returns:
UnitPlan object.
"""
return UnitPlan
class LessonIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for Lesson model."""
text = indexes.CharField(document=True, use_template=True)
topic = indexes.CharField(model_attr='topic')
unit_plan = indexes.CharField(model_attr='unit_plan')
def get_model(self):
"""Return the Lesson model.
Returns:
Lesson object.
"""
return Lesson
class ProgrammingChallengeIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for ProgrammingChallenge model."""
text = indexes.CharField(document=True, use_template=True)
topic = indexes.CharField(model_attr='topic')
def get_model(self):
"""Return the ProgrammingChallenge model.
Returns:
ProgrammingChallenge object.
"""
return ProgrammingChallenge
class CurriculumIntegrationIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for CurriculumIntegration model."""
text = indexes.CharField(document=True, use_template=True)
topic = indexes.CharField(model_attr='topic')
def get_model(self):
"""Return the CurriculumIntegration model.
Returns:
CurriculumIntegration object.
"""
return CurriculumIntegration
class CurriculumAreaIndex(indexes.SearchIndex, indexes.Indexable):
"""Search index for CurriculumArea model."""
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
"""Return the CurriculumArea model.
Returns:
CurriculumArea object.
"""
return CurriculumArea
| Python | 0 |
8a03a3fbcfdb22dc21e5539462a2b235e744abba | change open/close to with | output.py | output.py | def summarizeECG(instHR, avgHR, brady, tachy):
"""Create txt file summarizing ECG analysis
:param instHR: (int)
:param avgHR: (int)
:param brady: (int)
:param tachy: (int)
"""
#Calls hrdetector() to get instantaneous heart rate
#instHR = findInstHR()
#Calls findAvgHR() to get average heart rate
#avgHR = findAvgHR()
#Calls bradyTimes() to get times when bradycardia occurred
#brady = bradyTimes()
#Calls tachtimes() to get times when tachycardia occurred
#tachy = tachyTimes()
#Writes the output of the ECG analysis to an output file named ecgOutput.txt
with open('ecgOutput.txt','w') as ecgResults:
instHRstr = "Estimated instantaneous heart rate: %s" % str(instHR)
avgHRstr = "Estimated average heart rate: %s" % str(avgHR)
bradystr = "Bradycardia occurred at: %s" % str(brady)
tachystr = "Tachycardia occurred at: %s" % str(tachy)
ecgResults.write(instHRstr + ' BPM\n' + avgHRstr + ' BPM\n' + bradystr + ' sec\n' + tachystr + ' sec')
| def summarizeECG(instHR, avgHR, brady, tachy):
"""Create txt file summarizing ECG analysis
:param instHR: (int)
:param avgHR: (int)
:param brady: (int)
:param tachy: (int)
"""
#Calls hrdetector() to get instantaneous heart rate
#instHR = findInstHR()
#Calls findAvgHR() to get average heart rate
#avgHR = findAvgHR()
#Calls bradyTimes() to get times when bradycardia occurred
#brady = bradyTimes()
#Calls tachtimes() to get times when tachycardia occurred
#tachy = tachyTimes()
#Writes the output of the ECG analysis to an output file named ecgOutput.txt
ecgResults = open('ecgOutput.txt','w')
instHRstr = "Estimated instantaneous heart rate: %s" % str(instHR)
avgHRstr = "Estimated average heart rate: %s" % str(avgHR)
bradystr = "Bradycardia occurred at: %s" % str(brady)
tachystr = "Tachycardia occurred at: %s" % str(tachy)
ecgResults.write(instHRstr + ' BPM\n' + avgHRstr + ' BPM\n' + bradystr + ' sec\n' + tachystr + ' sec')
ecgResults.close()
| Python | 0 |
b0a1f10d60abc6c9fc7751e3bae492976d3f3306 | Update version 1.0.0.dev3 -> 1.0.0.dev4 | dimod/package_info.py | dimod/package_info.py | __version__ = '1.0.0.dev4'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| __version__ = '1.0.0.dev3'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| Python | 0.000001 |
eccc07a4639e1da98c09689295964e0f15c8068c | Add fix author functionality | dasem/runeberg.py | dasem/runeberg.py | """runeberg.
Usage:
dasem.runeberg download-catalogue
dasem.runeberg catalogue-as-csv
Description
-----------
Runeberg is a digital library with primarily Nordic texts. It is available from
http://runeberg.org/
"""
from __future__ import absolute_import, division, print_function
from os.path import join
from re import DOTALL, UNICODE, findall
import sys
from pandas import DataFrame
import requests
from .config import data_directory
from .utils import make_data_directory
CATALOGUE_URL = 'http://runeberg.org/katalog.html'
CATALOGUE_FILENAME = 'katalog.html'
def fix_author(author):
"""Change surname-firstname order.
Parameters
----------
author : str
Author as string
Returns
-------
fixed_author : str
Changed author string.
Examples
--------
>>> author = 'Lybeck, Mikael)
>>> fix_author(author)
'Mikael Lybeck'
"""
author_parts = author.split(', ')
if len(author_parts) == 2:
fixed_author = author_parts[1] + ' ' + author_parts[0]
else:
fixed_author = author
return fixed_author
class Runeberg(object):
"""Runeberg.
Examples
--------
>>> runeberg = Runeberg()
>>> catalogue = runeberg.catalogue()
>>> danish_catalogue = catalogue.ix[catalogue.language == 'dk', :]
>>> len(danish_catalogue) > 300
True
"""
def download_catalogue(self):
"""Download and store locally the Runeberg catalogue."""
make_data_directory(data_directory(), 'runeberg')
filename = join(data_directory(), 'runeberg', CATALOGUE_FILENAME)
response = requests.get(CATALOGUE_URL)
with open(filename, 'w') as f:
f.write(response.content)
def catalogue(self, fix_author=True):
"""Retrieve and parse Runeberg catalogue.
Returns
-------
books : pandas.DataFrame
Dataframe with book information.
fix_author : bool, optional
Determine if author names should be rearranged in firstname-surname
order [default: True]
"""
response = requests.get(CATALOGUE_URL)
flags = DOTALL | UNICODE
tables = findall(r'<table.*?</table>', response.text, flags=flags)
rows = findall(r'<tr.*?</tr>', tables[1], flags=flags)
books = []
for row in rows[1:]:
elements = findall('<td.*?</td>', row, flags=flags)
book_id, title = findall(r'/(.*?)/">(.*?)<',
elements[4], flags=flags)[0]
try:
author_id, author = findall(r'/authors/(.*?).html">(.*?)<',
elements[6], flags=flags)[0]
except:
author_id, author = '', ''
if fix_author:
# fix_author name collision. TODO
author = globals()['fix_author'](author)
book = {
'type': findall(r'alt="(.*?)">', elements[0], flags=flags)[0],
'book_id': book_id,
'title': title,
'author_id': author_id,
'author': author,
'year': elements[8][15:-5],
'language': elements[10][-9:-7]
}
books.append(book)
return DataFrame(books)
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
runeberg = Runeberg()
if arguments['download-catalogue']:
runeberg.download_catalogue()
elif arguments['catalogue-as-csv']:
print(runeberg.catalogue().to_csv(encoding=encoding))
if __name__ == '__main__':
main()
| """runeberg.
Usage:
dasem.runeberg download-catalogue
dasem.runeberg catalogue-as-csv
Description
-----------
Runeberg is a digital library with primarily Nordic texts. It is available from
http://runeberg.org/
"""
from __future__ import absolute_import, division, print_function
from os.path import join
from re import DOTALL, UNICODE, findall
import sys
from pandas import DataFrame
import requests
from .config import data_directory
from .utils import make_data_directory
CATALOGUE_URL = 'http://runeberg.org/katalog.html'
CATALOGUE_FILENAME = 'katalog.html'
def fix_author(author):
"""Change surname-firstname order.
Parameters
----------
author : str
Author as string
Returns
-------
fixed_author : str
Changed author string.
Examples
--------
>>> author = 'Lybeck, Mikael)
>>> fix_author(author)
'Mikael Lybeck'
"""
author_parts = author.split(', ')
if author_parts == 2:
fixed_author = author_parts[1] + ' ' + author_parts[0]
else:
fixed_author = author
return fixed_author
class Runeberg(object):
"""Runeberg.
Examples
--------
>>> runeberg = Runeberg()
>>> catalogue = runeberg.catalogue()
>>> danish_catalogue = catalogue.ix[catalogue.language == 'dk', :]
>>> len(danish_catalogue) > 300
True
"""
def download_catalogue(self):
"""Download and store locally the Runeberg catalogue."""
make_data_directory(data_directory(), 'runeberg')
filename = join(data_directory(), 'runeberg', CATALOGUE_FILENAME)
response = requests.get(CATALOGUE_URL)
with open(filename, 'w') as f:
f.write(response.content)
def catalogue(self):
"""Retrieve and parse Runeberg catalogue.
Returns
-------
books : pandas.DataFrame
Dataframe with book information.
"""
response = requests.get(CATALOGUE_URL)
flags = DOTALL | UNICODE
tables = findall(r'<table.*?</table>', response.text, flags=flags)
rows = findall(r'<tr.*?</tr>', tables[1], flags=flags)
books = []
for row in rows[1:]:
elements = findall('<td.*?</td>', row, flags=flags)
book_id, title = findall(r'/(.*?)/">(.*?)<',
elements[4], flags=flags)[0]
try:
author_id, author = findall(r'/authors/(.*?).html">(.*?)<',
elements[6], flags=flags)[0]
except:
author_id, author = '', ''
book = {
'type': findall(r'alt="(.*?)">', elements[0], flags=flags)[0],
'book_id': book_id,
'title': title,
'author_id': author_id,
'author': author,
'year': elements[8][15:-5],
'language': elements[10][-9:-7]
}
books.append(book)
return DataFrame(books)
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
runeberg = Runeberg()
if arguments['download-catalogue']:
runeberg.download_catalogue()
elif arguments['catalogue-as-csv']:
print(runeberg.catalogue().to_csv(encoding=encoding))
if __name__ == '__main__':
main()
| Python | 0.000001 |
034fa60d73468df21b6f75eb7a8130ab9a40cbae | Fix #3225 | module/plugins/hoster/FilerNet.py | module/plugins/hoster/FilerNet.py | # -*- coding: utf-8 -*-
import os
import re
from ..captcha.ReCaptcha import ReCaptcha
from ..internal.SimpleHoster import SimpleHoster
class FilerNet(SimpleHoster):
__name__ = "FilerNet"
__type__ = "hoster"
__version__ = "0.28"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?filer\.net/get/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Filer.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>[\w^_]+)</small></h1>'
OFFLINE_PATTERN = r'Nicht gefunden'
WAIT_PATTERN = r'var count = (\d+);'
LINK_FREE_PATTERN = LINK_PREMIUM_PATTERN = r'href="([^"]+)">Get download</a>'
def handle_free(self, pyfile):
inputs = self.parse_html_form(input_names={'token': re.compile(r'.+')})[1]
if inputs is None or 'token' not in inputs:
self.retry()
self.data = self.load(pyfile.url, post={'token': inputs['token']})
inputs = self.parse_html_form(input_names={'hash': re.compile(r'.+')})[1]
if inputs is None or 'hash' not in inputs:
self.error(_("Unable to detect hash"))
self.captcha = ReCaptcha(pyfile)
response, challenge = self.captcha.challenge()
#: Avoid 'Correct catcha'
captcha_task = self.captcha.task
self.captcha.task = None
self.download(pyfile.url,
post={'g-recaptcha-response': response,
'hash': inputs['hash']})
#: Restore the captcha task
self.captcha.task = captcha_task
if self.scan_download({'html': re.compile(r'\A\s*<!DOCTYPE html')}) == "html":
self.log_warning(
_("There was HTML code in the downloaded file (%s)...bad captcha? The download will be restarted." %
self.pyfile.name))
os.remove(self.last_download)
self.retry_captcha()
else:
self.captcha.correct()
| # -*- coding: utf-8 -*-
import os
import re
from ..captcha.ReCaptcha import ReCaptcha
from ..internal.SimpleHoster import SimpleHoster
class FilerNet(SimpleHoster):
__name__ = "FilerNet"
__type__ = "hoster"
__version__ = "0.27"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?filer\.net/get/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Filer.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>[\w^_]+)</small></h1>'
OFFLINE_PATTERN = r'Nicht gefunden'
WAIT_PATTERN = r'var count = (\d+);'
LINK_FREE_PATTERN = LINK_PREMIUM_PATTERN = r'href="([^"]+)">Get download</a>'
def handle_free(self, pyfile):
inputs = self.parse_html_form(
input_names={'token': re.compile(r'.+')})[1]
if 'token' not in inputs:
self.error(_("Unable to detect token"))
self.data = self.load(pyfile.url, post={'token': inputs['token']})
inputs = self.parse_html_form(input_names={'hash': re.compile(r'.+')})[1]
if 'hash' not in inputs:
self.error(_("Unable to detect hash"))
self.captcha = ReCaptcha(pyfile)
response, challenge = self.captcha.challenge()
#: Avoid 'Correct catcha'
captcha_task = self.captcha.task
self.captcha.task = None
self.download(pyfile.url,
post={'g-recaptcha-response': response,
'hash': inputs['hash']})
#: Restore the captcha task
self.captcha.task = captcha_task
if self.scan_download(
{'html': re.compile(r'\A\s*<!DOCTYPE html')}) == "html":
self.log_warning(
_("There was HTML code in the downloaded file (%s)...bad captcha? The download will be restarted." %
self.pyfile.name))
os.remove(self.last_download)
self.retry_captcha()
else:
self.captcha.correct()
| Python | 0 |
518443854f7ef4466885d88cf7b379c626692da1 | Add PlannedBudgetLimits to Budgets::Budget BudgetData | troposphere/budgets.py | troposphere/budgets.py | # Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 8.0.0
from . import AWSObject
from . import AWSProperty
from .validators import boolean
from .validators import double
class CostTypes(AWSProperty):
props = {
'IncludeCredit': (boolean, False),
'IncludeDiscount': (boolean, False),
'IncludeOtherSubscription': (boolean, False),
'IncludeRecurring': (boolean, False),
'IncludeRefund': (boolean, False),
'IncludeSubscription': (boolean, False),
'IncludeSupport': (boolean, False),
'IncludeTax': (boolean, False),
'IncludeUpfront': (boolean, False),
'UseAmortized': (boolean, False),
'UseBlended': (boolean, False),
}
class Spend(AWSProperty):
props = {
'Amount': (double, True),
'Unit': (basestring, True),
}
class TimePeriod(AWSProperty):
props = {
'End': (basestring, False),
'Start': (basestring, False),
}
class BudgetData(AWSProperty):
props = {
'BudgetLimit': (Spend, False),
'BudgetName': (basestring, False),
'BudgetType': (basestring, True),
'CostFilters': (dict, False),
'CostTypes': (CostTypes, False),
'PlannedBudgetLimits': (dict, False),
'TimePeriod': (TimePeriod, False),
'TimeUnit': (basestring, True),
}
class Notification(AWSProperty):
props = {
'ComparisonOperator': (basestring, True),
'NotificationType': (basestring, True),
'Threshold': (double, True),
'ThresholdType': (basestring, False),
}
class Subscriber(AWSProperty):
props = {
'Address': (basestring, True),
'SubscriptionType': (basestring, True),
}
class NotificationWithSubscribers(AWSProperty):
props = {
'Notification': (Notification, True),
'Subscribers': ([Subscriber], True),
}
class Budget(AWSObject):
resource_type = "AWS::Budgets::Budget"
props = {
'Budget': (BudgetData, True),
'NotificationsWithSubscribers':
([NotificationWithSubscribers], False),
}
| # Copyright (c) 2012-2018, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import boolean
class Spend(AWSProperty):
props = {
'Amount': (float, True),
'Unit': (basestring, True),
}
class CostTypes(AWSProperty):
props = {
'IncludeCredit': (boolean, False),
'IncludeDiscount': (boolean, False),
'IncludeOtherSubscription': (boolean, False),
'IncludeRecurring': (boolean, False),
'IncludeRefund': (boolean, False),
'IncludeSubscription': (boolean, False),
'IncludeSupport': (boolean, False),
'IncludeTax': (boolean, False),
'IncludeUpfront': (boolean, False),
'UseAmortized': (boolean, False),
'UseBlended': (boolean, False),
}
class TimePeriod(AWSProperty):
props = {
'End': (basestring, False),
'Start': (basestring, False),
}
class BudgetData(AWSProperty):
props = {
'BudgetLimit': (Spend, False),
'BudgetName': (basestring, False),
'BudgetType': (basestring, True),
'CostFilters': (dict, False),
'CostTypes': (CostTypes, False),
'TimePeriod': (TimePeriod, False),
'TimeUnit': (basestring, True),
}
class Notification(AWSProperty):
props = {
'ComparisonOperator': (basestring, True),
'NotificationType': (basestring, True),
'Threshold': (float, True),
'ThresholdType': (basestring, False),
}
class Subscriber(AWSProperty):
props = {
'Address': (basestring, True),
'SubscriptionType': (basestring, True),
}
class NotificationWithSubscribers(AWSProperty):
props = {
'Notification': (Notification, True),
'Subscribers': ([Subscriber], True),
}
class Budget(AWSObject):
resource_type = "AWS::Budgets::Budget"
props = {
'Budget': (BudgetData, True),
'NotificationsWithSubscribers':
([NotificationWithSubscribers], False),
}
| Python | 0.000006 |
f612fafa3e4d7352b64d993390fb074686fe46b7 | Update slack post format | chainer/ya/utils/slack.py | chainer/ya/utils/slack.py | import json
import os
import requests
from chainer.training import extension
class SlackPost(extension.Extension):
def __init__(self, token, channel, **kwargs):
self.token = token
self.channel = channel
self.priority = 50
def initialize(self, trainer):
try:
plot_report = trainer.get_extension("PlotReport")
except:
pass
else:
self.plotfilepath = os.path.join(trainer.out,
plot_report._file_name)
try:
args = trainer.get_extension("ArgumentBackup").args
except:
pass
else:
self.args = [
"{}:\t{}".format(k, getattr(args, k)) for k in vars(args)
]
def finalize(self):
msgs = ["Training finished"]
attachments = []
if hasattr(self, "args"):
msgs += self.args
if hasattr(self, "plotfilepath"):
data = {
"token": self.token,
"channels": self.channel,
"initial_comment": "\n".join(msgs),
"icon_url": "https://chainer.org/images/chainer_icon_red.png",
"username": "Chainer Result",
}
files = {'file': open(self.plotfilepath, 'rb')}
ret = requests.post(
"https://slack.com/api/files.upload", data=data, files=files)
obj = ret.json()
attachments = [{
"fallback": "plot",
"color": "good",
"title": "Result",
"image_url": obj["file"]["url_private"],
}]
else:
data = {
"token": self.token,
"channel": self.channel,
"as_user": False,
"text": "\n".join(msgs),
"icon_url": "https://chainer.org/images/chainer_icon_red.png",
"unfurl_media": True,
"attachments": json.dumps(attachments),
"username": "Chainer Result",
}
ret = requests.post(
"https://slack.com/api/chat.postMessage", data=data)
| import os
import requests
import json
from chainer.training import extension
class SlackPost(extension.Extension):
def __init__(self, token, channel, **kwargs):
self.token = token
self.channel = channel
self.priority = 50
def initialize(self, trainer):
try:
plot_report = trainer.get_extension("PlotReport")
except:
pass
else:
self.plotfilepath = os.path.join(trainer.out,
plot_report._file_name)
try:
args = trainer.get_extension("ArgumentBackup").args
except:
pass
else:
self.args = ["{}:\t{}".format(k,getattr(args, k))
for k in vars(args)]
def finalize(self):
msgs = ["Training finished"]
attachments = []
if hasattr(self, "args"):
msgs += self.args
if hasattr(self, "plotfilepath"):
data = {
"token": self.token,
"channels": self.channel,
"initial_comment": "\n".join(msgs),
}
files = {'file': open(self.plotfilepath, 'rb')}
requests.post("https://slack.com/api/files.upload",
data=data, files=files)
else:
data = {
"token": self.token,
"channel": self.channel,
"as_user": False,
"text": "\n".join(msgs),
"icon_url": "https://chainer.org/images/chainer_icon_red.png",
"unfurl_media": True,
"attachments": json.dumps(attachments),
"username": "Chainer Result",
}
requests.post("https://slack.com/api/chat.postMessage",
data=data)
| Python | 0 |
48412195e020c7f2a549deb869d98f6a366d9552 | improve workflow conversion | cwlupgrader/main.py | cwlupgrader/main.py | #!/usr/bin/env python
from __future__ import print_function
import ruamel.yaml
from typing import Any, Dict, Union
from collections import Mapping, MutableMapping, Sequence
import sys
import copy
def main(): # type: () -> int
for path in sys.argv[1:]:
with open(path) as entry:
document = ruamel.yaml.round_trip_load(entry)
if ('cwlVersion' in document
and document['cwlVersion'] == 'cwl:draft-3'):
draft3_to_v1_0(document)
else:
print("Skipping non draft-3 CWL document", file=sys.stderr)
print(ruamel.yaml.round_trip_dump(document))
return 0
def draft3_to_v1_0(document): # type: (Dict[str, Any]) -> None
_draft3_to_v1_0(document)
document['cwlVersion'] = 'v1.0'
def _draft3_to_v1_0(document):
# type: (MutableMapping[str, Any]) -> MutableMapping[str, Any]
if "class" in document:
if document["class"] == "Workflow":
inputOutputClean(document)
for out in document["outputs"]:
out["outputSource"] = out.pop("source").lstrip('#')
for step in document["steps"]:
step["out"] = step.pop("outputs")
for inp in step["inputs"]:
inp["id"] = inp["id"][len(step["id"])+1:] # remove step id prefix
inp["source"] = inp["source"].lstrip('#')
step["in"] = step.pop("inputs")
if "scatter" in step:
step["scatter"] = step["scatter"][ # remove step prefix
len(step["id"])*2+3:]
elif document["class"] == "File":
document["location"] = document.pop("path")
elif document["class"] == "CreateFileRequirement":
document["class"] = "InitialWorkDirRequirement"
document["listing"] = []
for filedef in document["fileDef"]:
document["listing"].append({
"entryname": filedef["filename"],
"entry": filedef["fileContent"]
})
del document["fileDef"]
elif document["class"] == "CommandLineTool":
inputOutputClean(document)
if "secondaryFiles" in document:
for i, sf in enumerate(document["secondaryFiles"]):
if "$(" in sf or "${" in sf:
document["secondaryFiles"][i] = sf.replace(
'"path"', '"location"').replace(".path", ".location")
if "description" in document:
document["doc"] = document["description"]
del document["description"]
if isinstance(document, MutableMapping):
for key, value in document.items():
if isinstance(value, MutableMapping):
document[key] = _draft3_to_v1_0(value)
elif isinstance(value, list):
for index, entry in enumerate(value):
if isinstance(entry, MutableMapping):
value[index] = _draft3_to_v1_0(entry)
return document
def inputOutputClean(document): # type: (MutableMapping[str, Any]) -> None
for paramType in ['inputs', 'outputs']:
for param in document[paramType]:
param['id'] = param['id'].lstrip('#')
if 'type' in param:
param['type'] = shortenType(param['type'])
def shortenType(typeObj):
# type: (List[Any]) -> Union[str, List[Any]]
if isinstance(typeObj, str) or not isinstance(typeObj, Sequence):
return typeObj
newType = []
for entry in typeObj: # find arrays that we can shorten and do so
if isinstance(entry, Mapping):
if (entry['type'] == 'array' and
isinstance(entry['items'], str)):
entry = entry['items'] + '[]'
newType.extend([entry])
typeObj = newType
if len(typeObj) == 2:
if 'null' in typeObj:
typeCopy = copy.deepcopy(typeObj)
typeCopy.remove('null')
if isinstance(typeCopy[0], str):
return typeCopy[0] + '?'
return typeObj
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
from __future__ import print_function
import ruamel.yaml
from typing import Any, Dict, Union
from collections import Mapping, MutableMapping, Sequence
import sys
import copy
def main(): # type: () -> int
for path in sys.argv[1:]:
with open(path) as entry:
document = ruamel.yaml.round_trip_load(entry)
if ('cwlVersion' in document
and document['cwlVersion'] == 'cwl:draft-3'):
draft3_to_v1_0(document)
else:
print("Skipping non draft-3 CWL document", file=sys.stderr)
print(ruamel.yaml.round_trip_dump(document))
return 0
def draft3_to_v1_0(document): # type: (Dict[str, Any]) -> None
_draft3_to_v1_0(document)
document['cwlVersion'] = 'v1.0'
def _draft3_to_v1_0(document):
# type: (MutableMapping[str, Any]) -> MutableMapping[str, Any]
if "class" in document:
if document["class"] == "Workflow":
for out in document["outputs"]:
out["outputSource"] = out["source"]
del out["source"]
elif document["class"] == "File":
document["location"] = document["path"]
del document["path"]
elif document["class"] == "CreateFileRequirement":
document["class"] = "InitialWorkDirRequirement"
document["listing"] = []
for filedef in document["fileDef"]:
document["listing"].append({
"entryname": filedef["filename"],
"entry": filedef["fileContent"]
})
del document["fileDef"]
elif document["class"] == "CommandLineTool":
setupCLTMappings(document)
if "secondaryFiles" in document:
for i, sf in enumerate(document["secondaryFiles"]):
if "$(" in sf or "${" in sf:
document["secondaryFiles"][i] = sf.replace(
'"path"', '"location"').replace(".path", ".location")
if "description" in document:
document["doc"] = document["description"]
del document["description"]
if isinstance(document, MutableMapping):
for key, value in document.items():
if isinstance(value, MutableMapping):
document[key] = _draft3_to_v1_0(value)
elif isinstance(value, list):
for index, entry in enumerate(value):
if isinstance(entry, MutableMapping):
value[index] = _draft3_to_v1_0(entry)
return document
def setupCLTMappings(document): # type: (MutableMapping[str, Any]) -> None
for paramType in ['inputs', 'outputs']:
params = {}
for param in document[paramType]:
paramID = param['id'].lstrip('#')
param['type'] = shortenType(param['type'])
if len(param) == 2 and 'type' in param:
params[paramID] = param['type']
else:
del param['id']
params[paramID] = param
document[paramType] = params
def shortenType(typeObj):
# type: (List[Any]) -> Union[str, List[Any]]
if isinstance(typeObj, str) or not isinstance(typeObj, Sequence):
return typeObj
newType = []
for entry in typeObj: # find arrays that we can shorten and do so
if isinstance(entry, Mapping):
if (entry['type'] == 'array' and
isinstance(entry['items'], str)):
entry = entry['items'] + '[]'
newType.extend([entry])
typeObj = newType
if len(typeObj) == 2:
if 'null' in typeObj:
typeCopy = copy.deepcopy(typeObj)
typeCopy.remove('null')
if isinstance(typeCopy[0], str):
return typeCopy[0] + '?'
return typeObj
if __name__ == "__main__":
sys.exit(main())
| Python | 0.000002 |
ba84f4a1b11f486d211254721397be43f8c9b07a | update __manifest__.py | tko_coexiste_coa/__manifest__.py | tko_coexiste_coa/__manifest__.py | # -*- coding: utf-8 -*-
# © 2017 TKO <http://tko.tko-br.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Plano de Contas Brasileiro',
'summary': '',
'description': 'Plano de contas brasileiro adaptável a qualquer segmento.',
'author': 'TKO',
'category': 'l10n_br',
'license': 'AGPL-3',
'website': 'http://tko.tko-br.com',
'version': '10.0.0.0.0',
'application': False,
'installable': True,
'auto_install': False,
'depends': [
'account',
'br_account',
'account_parent',
],
'external_dependencies': {
'python': [],
'bin': [],
},
'init_xml': [],
'update_xml': [],
'css': [],
'demo_xml': [],
'test': [],
'data': [
'data/chart_data_properties.xml',
'data/chart_data.xml',
'data/account.account.template.csv',
# TODO Separate proprities for products vs. services (enhance data/chart_data_properties.xml)
# TODO Criar Contas Pai
# TODO Create & Import l10n_br Taxes
],
}
| # -*- coding: utf-8 -*-
# © 2017 TKO <http://tko.tko-br.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Plano de Contas Brasileiro',
'summary': '',
'description': 'Plano de contas brasileiro adaptável a qualquer segmento.',
'author': 'TKO',
'category': 'l10n_br',
'license': 'AGPL-3',
'website': 'http://tko.tko-br.com',
'version': '10.0.0.0.0',
'application': False,
'installable': True,
'auto_install': False,
'depends': [
'account',
'br_account',
'account_parent',
],
'external_dependencies': {
'python': [],
'bin': [],
},
'init_xml': [],
'update_xml': [],
'css': [],
'demo_xml': [],
'test': [],
'data': [
'data/chart_data.xml',
'data/account.account.template.csv',
'data/chart_data_properties.xml',
# TODO Separate proprities for products vs. services (enhance data/chart_data_properties.xml)
# TODO Criar Contas Pai
# TODO Create & Import l10n_br Taxes
],
}
| Python | 0.000059 |
febb2e9369a706d7319d89851cac3dc9a1fd167e | add source of kyoko image | tsundiary/jinja_env.py | tsundiary/jinja_env.py | from tsundiary import app
app.jinja_env.globals.update(theme_nicename = {
'classic': 'Classic Orange',
'minimal': 'Minimal Black/Grey',
'misato-tachibana': 'Misato Tachibana',
'rei-ayanami': 'Rei Ayanami',
'saya': 'Saya',
'yuno': 'Yuno Gasai',
'kyoko-sakura': 'Kyoko Sakura',
'colorful': 'Based on favorite color'
})
app.jinja_env.globals.update(themes = ['classic', 'minimal', 'misato-tachibana', 'rei-ayanami', 'saya', 'yuno', 'colorful'])
app.jinja_env.globals.update(theme_creds = {
'misato-tachibana': '<a href="http://konachan.com/post/show/102801">Misato Tachibana vector source</a>',
'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei vector source</a>',
'saya': '<a href="http://www.zerochan.net/671274">Saya source</a>',
'kyoko-sakura': '<a href="http://3071527.deviantart.com/art/kyoko-sakura-376238110">Kyoko source</a>'
})
app.jinja_env.globals.update(theme_colors = [
('Red', '0,100,100'),
('Orange', '35,100,100'),
('Yellow', '50,100,100'),
('Green', '120,100,80'),
('Cyan', '180,100,80'),
('Blue', '215,100,100'),
('Purple', '270,100,100'),
('Black', '0,0,0'),
('Grey', '0,0,70'),
('White', '0,0,100'),
('Saya Green', '152,100,100'),
('Tsundiary Orange', '17,100,100'),
])
| from tsundiary import app
app.jinja_env.globals.update(theme_nicename = {
'classic': 'Classic Orange',
'minimal': 'Minimal Black/Grey',
'misato-tachibana': 'Misato Tachibana',
'rei-ayanami': 'Rei Ayanami',
'saya': 'Saya',
'yuno': 'Yuno Gasai',
'kyoko-sakura': 'Kyoko Sakura',
'colorful': 'Based on favorite color'
})
app.jinja_env.globals.update(themes = ['classic', 'minimal', 'misato-tachibana', 'rei-ayanami', 'saya', 'yuno', 'colorful'])
app.jinja_env.globals.update(theme_creds = {
'misato-tachibana': '<a href="http://konachan.com/post/show/102801">Misato Tachibana vector source</a>',
'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei vector source</a>',
'saya': '<a href="http://www.zerochan.net/671274">Saya source</a>',
'kyoko-sakura': "An artist drew this Kyoko, I'm sure."
})
app.jinja_env.globals.update(theme_colors = [
('Red', '0,100,100'),
('Orange', '35,100,100'),
('Yellow', '50,100,100'),
('Green', '120,100,80'),
('Cyan', '180,100,80'),
('Blue', '215,100,100'),
('Purple', '270,100,100'),
('Black', '0,0,0'),
('Grey', '0,0,70'),
('White', '0,0,100'),
('Saya Green', '152,100,100'),
('Tsundiary Orange', '17,100,100'),
])
| Python | 0 |
fb9e2ec66f2c80b60ae565665f091b0ee47843a9 | Remove six lib from install script | docs/scripts/install.py | docs/scripts/install.py | #!/usr/bin/env python
'''
File name: install
Author: Tim Anema
Date created: Sep 29, 2016
Date last modified: Sep 14 2018
Python Version: 2.7
Description: Install script for themekit. It will download a release and make it executable
'''
import os, json, sys, hashlib
class Installer(object):
LATEST_RELEASE_URL = "https://shopify-themekit.s3.amazonaws.com/releases/latest.json"
ARCH_MAPPING = {
"darwin x86_64": "darwin-amd64",
"darwin i386": "darwin-386",
"linux x86_64": "linux-amd64",
"linux i386": "linux-386",
"freebsd x86_64": "freebsd-amd64",
"freebsd i386": "freebsd-386"
}
def __init__(self, path="/usr/local/bin"):
self.install_path = os.path.expanduser(path)
self.bin_path = "%s/theme" % self.install_path
self.arch = self.__getArch()
print("Fetching release data")
self.release = json.loads(self.__req(Installer.LATEST_RELEASE_URL).decode("utf-8"))
print("Downloading version %s of Shopify Themekit" % self.release['version'])
self.__download()
print("Theme Kit has been installed at %s" % self.bin_path)
print('To verify themekit is working simply type "theme"')
def __getArch(self):
pipe = os.popen("echo \"$(uname) $(uname -m)\"")
arch_name = pipe.readline().strip().lower()
pipe.close()
if arch_name not in Installer.ARCH_MAPPING:
print("Cannot find binary to match your architecture [%s]" % arch_name)
sys.exit("Please open an issue at https://github.com/Shopify/themekit/issues")
return Installer.ARCH_MAPPING[arch_name]
def __findReleasePlatform(self):
for index, platform in enumerate(self.release['platforms']):
if platform['name'] == self.arch:
return platform
def __download(self):
platform = self.__findReleasePlatform()
data = self.__req(platform['url'])
if hashlib.md5(data).hexdigest() != platform['digest']:
sys.exit("Downloaded binary did not match checksum.")
else:
print("Validated binary checksum")
if not os.path.exists(self.install_path):
os.makedirs(self.install_path)
with open(self.bin_path, "wb") as themefile:
themefile.write(data)
os.chmod(self.bin_path, 0o755)
def __req(self, url):
if sys.version_info[0] < 3:
import urllib
return urllib.urlopen(url).read()
else:
import urllib.request
return urllib.request.urlopen(url).read()
Installer()
| #!/usr/bin/env python
'''
File name: install.py
Author: Tim Anema
Date created: Sep 29, 2016
Date last modified: Nov 19 2020
Python Version: 2.x, 3.x
Description: Install script for themekit. It will download a release and make it executable
'''
import os, json, sys, hashlib
from six.moves.urllib.request import urlopen
class Installer(object):
LATEST_RELEASE_URL = "https://shopify-themekit.s3.amazonaws.com/releases/latest.json"
ARCH_MAPPING = {
"darwin x86_64": "darwin-amd64",
"darwin i386": "darwin-386",
"linux x86_64": "linux-amd64",
"linux i386": "linux-386",
"freebsd x86_64": "freebsd-amd64",
"freebsd i386": "freebsd-386"
}
def __init__(self, path="/usr/local/bin"):
self.install_path = os.path.expanduser(path)
self.bin_path = "%s/theme" % self.install_path
self.arch = self.__getArch()
print("Fetching release data")
self.release = json.loads(urlopen(Installer.LATEST_RELEASE_URL).read().decode("utf-8"))
print("Downloading version %s of Shopify Themekit" % self.release['version'])
self.__download()
print("Theme Kit has been installed at %s" % self.bin_path)
print('To verify themekit is working simply type "theme"')
def __getArch(self):
pipe = os.popen("echo \"$(uname) $(uname -m)\"")
arch_name = pipe.readline().strip().lower()
pipe.close()
if arch_name not in Installer.ARCH_MAPPING:
print("Cannot find binary to match your architecture [%s]" % arch_name)
sys.exit("Please open an issue at https://github.com/Shopify/themekit/issues")
return Installer.ARCH_MAPPING[arch_name]
def __findReleasePlatform(self):
for index, platform in enumerate(self.release['platforms']):
if platform['name'] == self.arch:
return platform
def __download(self):
platform = self.__findReleasePlatform()
data = urlopen(platform['url']).read()
if hashlib.md5(data).hexdigest() != platform['digest']:
sys.exit("Downloaded binary did not match checksum.")
else:
print("Validated binary checksum")
if not os.path.exists(self.install_path):
os.makedirs(self.install_path)
with open(self.bin_path, "wb") as themefile:
themefile.write(data)
os.chmod(self.bin_path, 0o755)
Installer()
| Python | 0 |
3cf93f7f640ef04a1be31d515c19cffec19cec45 | Remove logging import unused | searchlightclient/osc/plugin.py | searchlightclient/osc/plugin.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from osc_lib import utils
DEFAULT_SEARCH_API_VERSION = '1'
API_VERSION_OPTION = 'os_search_api_version'
API_NAME = 'search'
API_VERSIONS = {
'1': 'searchlightclient.v1.client.Client',
}
def make_client(instance):
"""Returns a search service client"""
search_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
# Set client http_log_debug to True if verbosity level is high enough
http_log_debug = utils.get_effective_log_level() <= logging.DEBUG
# Remember interface only if it is set
kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface)
client = search_client(
session=instance.session,
http_log_debug=http_log_debug,
region_name=instance._region_name,
**kwargs
)
return client
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-search-api-version',
metavar='<search-api-version>',
default=utils.env(
'OS_SEARCH_API_VERSION',
default=DEFAULT_SEARCH_API_VERSION),
help='Search API version, default=' +
DEFAULT_SEARCH_API_VERSION +
' (Env: OS_SEARCH_API_VERSION)')
return parser
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_SEARCH_API_VERSION = '1'
API_VERSION_OPTION = 'os_search_api_version'
API_NAME = 'search'
API_VERSIONS = {
'1': 'searchlightclient.v1.client.Client',
}
def make_client(instance):
"""Returns a search service client"""
search_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
# Set client http_log_debug to True if verbosity level is high enough
http_log_debug = utils.get_effective_log_level() <= logging.DEBUG
# Remember interface only if it is set
kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface)
client = search_client(
session=instance.session,
http_log_debug=http_log_debug,
region_name=instance._region_name,
**kwargs
)
return client
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-search-api-version',
metavar='<search-api-version>',
default=utils.env(
'OS_SEARCH_API_VERSION',
default=DEFAULT_SEARCH_API_VERSION),
help='Search API version, default=' +
DEFAULT_SEARCH_API_VERSION +
' (Env: OS_SEARCH_API_VERSION)')
return parser
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.