text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import unittest
from unittest import mock
# Threaded Implementation
import threaded
class ThreadedTest(unittest.TestCase):
def test_add_basic(self):
@threaded.threaded
def func_test():
pass
# pylint: disable=assignment-from-no-return
test_thread = func_test()
# pylint: enable=assignment-from-no-return
self.assertEqual(test_thread.name, "Threaded: func_test")
self.assertFalse(test_thread.daemon)
self.assertFalse(test_thread.is_alive())
def test_add_func(self):
@threaded.threaded()
def func_test():
pass
# pylint: disable=assignment-from-no-return
test_thread = func_test()
# pylint: enable=assignment-from-no-return
self.assertEqual(test_thread.name, "Threaded: func_test")
self.assertFalse(test_thread.daemon)
self.assertFalse(test_thread.is_alive())
def test_name(self):
@threaded.threaded(name="test name")
def func_test():
pass
# pylint: disable=assignment-from-no-return
test_thread = func_test()
# pylint: enable=assignment-from-no-return
self.assertEqual(test_thread.name, "test name")
self.assertFalse(test_thread.daemon)
self.assertFalse(test_thread.is_alive())
def test_daemon(self):
@threaded.threaded(daemon=True)
def func_test():
pass
# pylint: disable=assignment-from-no-return
test_thread = func_test()
# pylint: enable=assignment-from-no-return
self.assertEqual(test_thread.name, "Threaded: func_test")
self.assertTrue(test_thread.daemon)
self.assertFalse(test_thread.is_alive())
@mock.patch("threading.Thread", autospec=True)
def test_started(self, thread):
@threaded.threaded(started=True)
def func_test():
pass
func_test()
self.assertIn(mock.call().start(), thread.mock_calls)
|
{
"content_hash": "00c1386be3572c0119c74d188662358f",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 65,
"avg_line_length": 30.53846153846154,
"alnum_prop": 0.6251889168765743,
"repo_name": "penguinolog/threaded",
"id": "926f6fb31ea8e8fdbc9d9afb028a82c2f90e8a62",
"size": "2632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_threaded.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "973"
},
{
"name": "Python",
"bytes": "67156"
},
{
"name": "Shell",
"bytes": "2607"
}
],
"symlink_target": ""
}
|
"""Test the locking library."""
from __future__ import print_function
import itertools
import multiprocessing
import os
import sys
import time
from chromite.lib import cros_test_lib
from chromite.lib import locking
from chromite.lib import parallel
from chromite.lib import osutils
from chromite.lib import process_util
from chromite.lib import timeout_util
LOCK_ACQUIRED = 5
LOCK_NOT_ACQUIRED = 6
class LockingTest(cros_test_lib.TempDirTestCase):
"""Test the Locking class."""
def setUp(self):
self.lock_file = os.path.join(self.tempdir, 'lockfile')
def _HelperSingleLockTest(self, blocking, shared, locktype):
"""Helper method that runs a basic test with/without blocking/sharing."""
self.assertNotExists(self.lock_file)
lock = locking.FileLock(
self.lock_file, blocking=blocking, locktype=locktype)
self.assertFalse(lock.IsLocked())
lock.write_lock()
self.assertTrue(lock.IsLocked())
self.assertExists(self.lock_file)
# Acquiring the lock again should be safe.
lock.lock(shared)
self.assertTrue(lock.IsLocked())
lock.close()
self.assertFalse(lock.IsLocked())
osutils.SafeUnlink(self.lock_file)
def _HelperInsideProcess(self, blocking, shared, locktype=locking.LOCKF):
"""Helper method that runs a basic test with/without blocking."""
try:
lock = locking.FileLock(
self.lock_file, blocking=blocking, locktype=locktype)
with lock.lock(shared):
pass
sys.exit(LOCK_ACQUIRED)
except locking.LockNotAcquiredError:
sys.exit(LOCK_NOT_ACQUIRED)
def _HelperStartProcess(self, blocking=False, shared=False):
"""Create a process and invoke _HelperInsideProcess in it."""
p = multiprocessing.Process(target=self._HelperInsideProcess,
args=(blocking, shared))
p.start()
# It's highly probably that p will have tried to grab the lock before the
# timer expired, but not certain.
time.sleep(0.1)
return p
def _HelperWithProcess(self, expected, blocking=False, shared=False,
locktype=locking.LOCKF):
"""Create a process and invoke _HelperInsideProcess in it."""
p = multiprocessing.Process(target=self._HelperInsideProcess,
args=(blocking, shared, locktype))
p.start()
p.join()
self.assertEqual(p.exitcode, expected)
def testSingleLock(self):
"""Just test getting releasing a lock with options."""
arg_list = [
[True, False], # blocking
[True, False], # shared
[locking.FLOCK, locking.LOCKF], # locking mechanism
]
for args in itertools.product(*arg_list):
self._HelperSingleLockTest(*args)
def testDoubleLockWithFlock(self):
"""Tests that double locks do block with flock."""
lock1 = locking.FileLock(
self.lock_file, blocking=False, locktype=locking.FLOCK)
lock2 = locking.FileLock(
self.lock_file, blocking=False, locktype=locking.FLOCK)
with lock1.write_lock():
self.assertTrue(lock1.IsLocked())
self.assertFalse(lock2.IsLocked())
self.assertRaises(locking.LockNotAcquiredError, lock2.write_lock)
self.assertTrue(lock1.IsLocked())
self.assertFalse(lock2.IsLocked())
self.assertFalse(lock1.IsLocked())
self.assertFalse(lock2.IsLocked())
lock2.unlock()
self.assertFalse(lock1.IsLocked())
self.assertFalse(lock2.IsLocked())
def testDoubleLockWithLockf(self):
"""Tests that double locks don't block with lockf."""
lock1 = locking.FileLock(
self.lock_file, blocking=False, locktype=locking.LOCKF)
lock2 = locking.FileLock(
self.lock_file, blocking=False, locktype=locking.LOCKF)
with lock1.write_lock():
self.assertTrue(lock1.IsLocked())
self.assertFalse(lock2.IsLocked())
# With lockf, we can lock the same file twice in the same process.
with lock2.write_lock():
self.assertTrue(lock1.IsLocked())
self.assertTrue(lock2.IsLocked())
self.assertFalse(lock1.IsLocked())
self.assertFalse(lock2.IsLocked())
def testContextMgr(self):
"""Make sure we behave properly with 'with'."""
# Create an instance, and use it in a with.
prelock = locking.FileLock(self.lock_file)
self._HelperWithProcess(expected=LOCK_ACQUIRED)
with prelock.write_lock() as lock:
# Assert the instance didn't change.
self.assertIs(prelock, lock)
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
self._HelperWithProcess(expected=LOCK_ACQUIRED)
# Construct the instance in the with expression.
with locking.FileLock(self.lock_file).write_lock() as lock:
self.assertIsInstance(lock, locking.FileLock)
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
self._HelperWithProcess(expected=LOCK_ACQUIRED)
def testAcquireBeforeWith(self):
"""Sometimes you want to grab a lock and then return it into 'with'."""
lock = locking.FileLock(self.lock_file, blocking=False)
lock.write_lock()
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
with lock:
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
self._HelperWithProcess(expected=LOCK_ACQUIRED)
def testSingleProcessLock(self):
"""Test grabbing the same lock in processes with no conflicts."""
arg_list = [
[LOCK_ACQUIRED],
[True, False], # blocking
[True, False], # shared
[locking.FLOCK, locking.LOCKF], # locking mechanism
]
for args in itertools.product(*arg_list):
self._HelperWithProcess(*args)
def testNonBlockingConflicts(self):
"""Test that we get a lock conflict for non-blocking locks."""
with locking.FileLock(self.lock_file).write_lock():
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED, shared=True)
# Can grab it after it's released.
self._HelperWithProcess(expected=LOCK_ACQUIRED)
def testSharedLocks(self):
"""Test lock conflict for blocking locks."""
# Intial lock is NOT shared.
with locking.FileLock(self.lock_file).write_lock():
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED, shared=True)
# Intial lock IS shared.
with locking.FileLock(self.lock_file).read_lock():
self._HelperWithProcess(expected=LOCK_ACQUIRED, shared=True)
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED,
shared=False)
def testBlockingConflicts(self):
"""Test lock conflict for blocking locks."""
# Intial lock is blocking, exclusive.
with locking.FileLock(self.lock_file, blocking=True).write_lock():
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED, blocking=False)
p = self._HelperStartProcess(blocking=True, shared=False)
# when the with clause exits, p should unblock and get the lock, setting
# its exit code to sucess now.
p.join()
self.assertEqual(p.exitcode, LOCK_ACQUIRED)
# Intial lock is NON blocking.
with locking.FileLock(self.lock_file, blocking=False).write_lock():
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
p = self._HelperStartProcess(blocking=True, shared=False)
# when the with clause exits, p should unblock and get the lock, setting
# it's exit code to sucess now.
p.join()
self.assertEqual(p.exitcode, LOCK_ACQUIRED)
# Intial lock is shared, blocking lock is exclusive.
with locking.FileLock(self.lock_file, blocking=False).read_lock():
self._HelperWithProcess(expected=LOCK_NOT_ACQUIRED)
self._HelperWithProcess(expected=LOCK_ACQUIRED, shared=True)
p = self._HelperStartProcess(blocking=True, shared=False)
q = self._HelperStartProcess(blocking=True, shared=False)
# when the with clause exits, p should unblock and get the lock, setting
# it's exit code to sucess now.
p.join()
self.assertEqual(p.exitcode, LOCK_ACQUIRED)
q.join()
self.assertEqual(p.exitcode, LOCK_ACQUIRED)
class PortableLinkLockTest(cros_test_lib.TempDirTestCase):
"""Test locking.PortableLinkLock class."""
def tearDown(self):
"""Looks for leaked files from the locking process."""
leaked_files = os.listdir(self.tempdir)
self.assertFalse(leaked_files,
'Found unexpected leaked files from locking: %r' %
leaked_files)
def testLockExclusivity(self):
"""Test that when we have a lock, someone else can't grab it."""
lock_path = os.path.join(self.tempdir, 'locked_file')
with locking.PortableLinkLock(lock_path, max_retry=0):
with self.assertRaises(locking.LockNotAcquiredError):
with locking.PortableLinkLock(lock_path, max_retry=5, sleep=0.1):
self.fail('We acquired a lock twice?')
def testCanUnlock(self):
"""Test that we release locks correctly."""
lock_path = os.path.join(self.tempdir, 'locked_file')
with locking.PortableLinkLock(lock_path, max_retry=0):
pass
with locking.PortableLinkLock(lock_path, max_retry=0):
pass
class PipeLockTest(cros_test_lib.TestCase):
"""Test locking.PipeLock class."""
def testFdLeakage(self):
"""Make sure we don't leak any fds."""
fds_before = os.listdir('/proc/self/fd/')
lock = locking.PipeLock()
fds_after = os.listdir('/proc/self/fd/')
self.assertEqual(len(fds_before), len(fds_after) - 2)
del lock
fds_finished = os.listdir('/proc/self/fd/')
self.assertEqual(fds_before, fds_finished)
def testSimple(self):
"""Test we can Wait/Post."""
# If this fails, we'd just hang :).
with timeout_util.Timeout(30):
lock = locking.PipeLock()
lock.Post()
lock.Post()
lock.Wait()
lock.Wait()
del lock
def testParallel(self):
"""Test interprocesses actually sync."""
write_lock = locking.PipeLock()
read_lock = locking.PipeLock()
with osutils.TempDir() as tempdir:
# Let the child create a file, but make sure the parent holds us off.
# Then make the parent wait for the child to tell us it's done.
flag_file = os.path.join(tempdir, 'foo')
pid = os.fork()
if pid == 0:
# Child.
# pylint: disable=protected-access
try:
write_lock.Wait()
del write_lock
osutils.Touch(flag_file)
read_lock.Post()
del read_lock
except Exception:
os._exit(1)
finally:
# No matter what happens, we must exit w/out running handlers.
os._exit(0)
else:
# Parent.
time.sleep(0.5)
self.assertNotExists(flag_file)
write_lock.Post()
del write_lock
read_lock.Wait()
del read_lock
self.assertExists(flag_file)
status = os.waitpid(pid, 0)[1]
self.assertEqual(process_util.GetExitStatus(status), 0)
def testParallelMany(self):
"""Same as testParallel, but with many more processes for stressing."""
parallel.RunParallelSteps([self.testParallel] * 40)
|
{
"content_hash": "ca7aca3dec8ca832bce5aa07a4f6b964",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 77,
"avg_line_length": 33.58841463414634,
"alnum_prop": 0.6741399655078515,
"repo_name": "endlessm/chromium-browser",
"id": "70c24cc63ca84a45940ef2f5d04c3b748a7b9bb5",
"size": "11207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/lib/locking_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
import inviwo_internal
class OutputRedirectStdout:
def write(self, string):
inviwo_internal.ivwPrint(string, 0)
class OutputRedirectStderr:
def write(self, string):
inviwo_internal.ivwPrint(string, 1)
sys.stdout = OutputRedirectStdout()
sys.stderr = OutputRedirectStderr()
|
{
"content_hash": "fc0c7b48d0271c94a983fd3908c92a51",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 22.5,
"alnum_prop": 0.7333333333333333,
"repo_name": "sarbi127/inviwo",
"id": "a99bcd935308a315170a7b64bd265c0ea2e1a2f4",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/python3/scripts/outputredirector.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "8314"
},
{
"name": "Batchfile",
"bytes": "11380"
},
{
"name": "C",
"bytes": "16542696"
},
{
"name": "C#",
"bytes": "713601"
},
{
"name": "C++",
"bytes": "20347995"
},
{
"name": "CMake",
"bytes": "473869"
},
{
"name": "COBOL",
"bytes": "2921725"
},
{
"name": "CSS",
"bytes": "26526"
},
{
"name": "D",
"bytes": "175403"
},
{
"name": "GLSL",
"bytes": "177518"
},
{
"name": "Groff",
"bytes": "6854"
},
{
"name": "HTML",
"bytes": "2691735"
},
{
"name": "Inno Setup",
"bytes": "8416"
},
{
"name": "Java",
"bytes": "287161"
},
{
"name": "JavaScript",
"bytes": "3140"
},
{
"name": "Logos",
"bytes": "2952312"
},
{
"name": "M",
"bytes": "9622"
},
{
"name": "Makefile",
"bytes": "797"
},
{
"name": "Mathematica",
"bytes": "30867"
},
{
"name": "Objective-C",
"bytes": "78973"
},
{
"name": "Objective-C++",
"bytes": "29141"
},
{
"name": "Pascal",
"bytes": "13054"
},
{
"name": "Python",
"bytes": "174077"
},
{
"name": "QMake",
"bytes": "1381"
},
{
"name": "Shell",
"bytes": "247978"
},
{
"name": "Smalltalk",
"bytes": "1501"
},
{
"name": "Smarty",
"bytes": "169"
},
{
"name": "UnrealScript",
"bytes": "1273"
},
{
"name": "XSLT",
"bytes": "3925"
}
],
"symlink_target": ""
}
|
from direct.distributed import DistributedObjectAI
from direct.fsm import FSM
from direct.directnotify import DirectNotifyGlobal
from toontown.coghq import FoodBeltBase
class DistributedFoodBeltAI(DistributedObjectAI.DistributedObjectAI, FSM.FSM, FoodBeltBase.FoodBeltBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedFoodBeltAI')
def __init__(self, air, boss, index):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
FSM.FSM.__init__(self, 'DistributedFoodBeltAI')
self.boss = boss
self.index = index
def delete(self):
DistributedObjectAI.DistributedObjectAI.delete(self)
def getBossCogId(self):
return self.boss.doId
def getIndex(self):
return self.index
def setState(self, state):
self.request(state)
def d_setState(self, state):
newState = state
if state == 'On':
newState = 'N'
elif state == 'Off':
newState = 'F'
elif state == 'Inactive':
newState = 'I'
elif state == 'Toonup':
newState = 'T'
self.sendUpdate('setState', [newState])
def b_setState(self, state):
self.request(state)
self.d_setState(state)
def turnOn(self):
self.b_setState('On')
def goInactive(self):
self.b_setState('Inactive')
def goToonup(self):
self.b_setState('Toonup')
def enterOn(self):
pass
def exitOn(slef):
pass
def enterOff(self):
pass
def exitOff(self):
pass
def enterInactive(self):
pass
def exitInactive(slef):
pass
|
{
"content_hash": "04ecb63477b0203645b3c76e77fced88",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 105,
"avg_line_length": 24.529411764705884,
"alnum_prop": 0.6205035971223022,
"repo_name": "Spiderlover/Toontown",
"id": "7cd1cce7271886521ddbfc58c35db1c8963d7f55",
"size": "1668",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "toontown/coghq/DistributedFoodBeltAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_le_repair_droid_chassis.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","le_repair_droid_chassis")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "d56a0b38297583b03c19feddd2e272bb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.7146974063400576,
"repo_name": "anhstudios/swganh",
"id": "a3ae1d4ed6285953897381f43c6ba64e8d255398",
"size": "492",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/component/droid/shared_le_repair_droid_chassis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from ray.rllib.models.tf.layers.gru_gate import GRUGate
from ray.rllib.models.tf.layers.noisy_layer import NoisyLayer
from ray.rllib.models.tf.layers.relative_multi_head_attention import \
RelativeMultiHeadAttention
from ray.rllib.models.tf.layers.skip_connection import SkipConnection
from ray.rllib.models.tf.layers.multi_head_attention import MultiHeadAttention
__all__ = [
"GRUGate", "MultiHeadAttention", "NoisyLayer",
"RelativeMultiHeadAttention", "SkipConnection"
]
|
{
"content_hash": "6c03787322f5cbab7ee3d7362a1ead64",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 44.18181818181818,
"alnum_prop": 0.7983539094650206,
"repo_name": "richardliaw/ray",
"id": "68ae2ea53c8b51eea9494b524d974a90c55aa553",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rllib/models/tf/layers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
}
|
import six
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally import osclients
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="lbaas", order=360)
class Lbaas(context.Context):
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"pool": {
"type": "object"
},
"lbaas_version": {
"type": "integer",
"minimum": 1
}
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"pool": {
"lb_method": "ROUND_ROBIN",
"protocol": "HTTP"
},
"lbaas_version": 1
}
@utils.log_task_wrapper(LOG.info, _("Enter context: `lbaas`"))
def setup(self):
net_wrapper = network_wrapper.wrap(
osclients.Clients(self.context["admin"]["endpoint"]),
self.context["task"],
config=self.config)
use_lb, msg = net_wrapper.supports_extension("lbaas")
if not use_lb:
LOG.info(msg)
return
# Creates a lb-pool for every subnet created in network context.
for user, tenant_id in (utils.iterate_per_tenants(
self.context.get("users", []))):
for network in self.context["tenants"][tenant_id]["networks"]:
for subnet in network.get("subnets", []):
if self.config["lbaas_version"] == 1:
network.setdefault("lb_pools", []).append(
net_wrapper.create_v1_pool(
tenant_id,
subnet,
**self.config["pool"]))
else:
raise NotImplementedError(
"Context for LBaaS version %s not implemented."
% self.config["lbaas_version"])
@utils.log_task_wrapper(LOG.info, _("Exit context: `lbaas`"))
def cleanup(self):
net_wrapper = network_wrapper.wrap(
osclients.Clients(self.context["admin"]["endpoint"]),
self.context["task"],
config=self.config)
for tenant_id, tenant_ctx in six.iteritems(self.context["tenants"]):
for network in tenant_ctx.get("networks", []):
for pool in network.get("lb_pools", []):
with logging.ExceptionLogger(
LOG,
_("Failed to delete pool %(pool)s for tenant "
"%(tenant)s") % {"pool": pool["pool"]["id"],
"tenant": tenant_id}):
if self.config["lbaas_version"] == 1:
net_wrapper.delete_v1_pool(pool["pool"]["id"])
|
{
"content_hash": "d6256c742de4a1043c5919e305fc2a22",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 36.53012048192771,
"alnum_prop": 0.495712401055409,
"repo_name": "cernops/rally",
"id": "ab29baf09807055988abc15280c425421fd32d73",
"size": "3605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rally/plugins/openstack/context/neutron/lbaas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "48262"
},
{
"name": "Python",
"bytes": "2726491"
},
{
"name": "Shell",
"bytes": "43920"
}
],
"symlink_target": ""
}
|
"""This file is part of the Scribee project.
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.1'
import os
import settings
class Entity(object):
"""An Entity is a generic language-agnostic object.
"""
class Types:
Generic = ""
Namespace = "namespace"
Constant = "const"
Variable = "var"
Function = "func"
Class = "class"
class DuplicateEntityException(Exception):
pass
entities = []
def __init__(self, name="", type="", brief="", details="", sources=[], parent=None, **kwargs):
self.name = name
self.type = type
self.brief = brief
self.details = details
self.sources = sources
self.parent = parent
self.__dict__.update(kwargs)
uid = self.uid()
for e in Entity.entities:
if e.uid() == uid:
raise DuplicateEntityException
Entity.entities.append(self)
def __repr__(self):
string = "Entity: <%s>" % self.uid()
if self.brief != "":
string += "\n-- brief: %s" % self.brief
if self.details != "":
string += "\n-- details: %s" % self.details
return string
def __str__(self):
return self.__repr__()
def uid(self):
"""Returns the entity's UID.
"""
return Entity.get_uid(self.name, self.type, self.parent)
def is_valid(self):
return self.type.lower() != ""
def ancestors(self):
if isinstance(self.parent, Entity):
return self.parent.ancestors() + [self.parent]
return []
def children(self, types=None):
if not isinstance(types, (list, tuple)):
types = (types)
return [e for e in Entity.entities if e.parent == self and (not types or e.type in types)]
@classmethod
def get_uid(cls, name, type, parent):
uid = "%s" % name
if type:
uid = "%s_%s" % (type, name)
if parent:
uid = "%s__%s" % (parent.uid(), uid)
return uid
@classmethod
def nearest(cls, name, type=None, parent=None):
# TODO: allow to search only with name and parent (without type).
ref_uid = cls.get_uid(name, type, parent)
ref_length = len(ref_uid)
remaining_chars = -1
nearest_entity = None
for entity in Entity.entities:
uid = entity.uid()
if uid.endswith(ref_uid):
rc = len(uid) - ref_length
if remaining_chars < 0 or rc < remaining_chars:
remaining_chars = rc
nearest_entity = entity
if rc == 0:
break
return nearest_entity
@classmethod
def get_or_create(cls, name, type, sources=[], parent=None):
if not name and not type:
return None
nearest = cls.nearest(name, type, parent)
if not nearest:
nearest = Entity(name, type, sources=sources, parent=parent)
else:
for s in sources:
if s not in nearest.sources:
nearest.sources.append(s)
return nearest
class DocBlock(object):
"""A DocBlock is a generic piece of documentation for one or more entities.
"""
blocks = []
def __init__(self, content="", source="", parent=None):
all_filters = getattr(settings, "FILTERS", {})
root, ext = os.path.splitext(source)
self.source = source
self.parent = parent
self._original = content
self.filtered = content.strip()
if all_filters.has_key(ext):
for filter_list in all_filters[ext]:
if isinstance(filter_list, dict):
filter_list = filters_list.values()
elif not isinstance(filter_list, (list, tuple)):
filter_list = (filter_list,)
for f in filter_list:
f.filter(self)
if isinstance(parent, Entity):
self.parent.details = self.filtered
try:
self.parent.brief = [l for l in self.filtered.splitlines() if l][0]
except IndexError:
pass
# FIXME: does it work?
if self not in self.__class__.blocks:
self.__class__.blocks.append(self)
def __repr__(self):
string = "DocBlock:"
string += "\n-- content: %s" % self.filtered
if self.parent:
string += "\n-- parent: %s" % self.parent.uid()
return string
def __str__(self):
return self.__repr__()
def is_valid(self):
return (self._original or self.filtered)
@classmethod
def get_or_create(cls, content, source=None, parent=None):
for block in cls.blocks:
if block._original == content:
if source and block.source != source:
continue
if parent and block.parent != parent:
continue
return block
return DocBlock(content, source=source, parent=parent)
|
{
"content_hash": "fceaa55b520f0c44dcfa43cb9dbb59c6",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 98,
"avg_line_length": 29.53370786516854,
"alnum_prop": 0.5261556020544037,
"repo_name": "zuck/scribee",
"id": "a7e6acaf984be4d89faccf44418f4252ffeaf7b7",
"size": "5299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entity.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "149"
},
{
"name": "C++",
"bytes": "997"
},
{
"name": "CSS",
"bytes": "9709"
},
{
"name": "HTML",
"bytes": "10333"
},
{
"name": "Python",
"bytes": "27891"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template, redirect, url_for, current_app
from deployer import database as db
from deployer.routing.models import Route
from deployer.utils import xhr_form, allow_traffic, get_container_ip
from . import models, forms
views = Blueprint('apps', __name__, template_folder='templates')
def get_app(key):
query = db.Session().query(models.Application).filter_by(key=key)
return db.get_one_or_abort(query)
@views.route('/')
def index():
apps = db.Session().query(models.Application).all()
return render_template('applications.html', apps=apps)
@views.route('/<regex("[A-Z0-9]+"):app_key>/')
def app(app_key):
return render_template('application.html', app=get_app(app_key))
@views.route('/new/', methods=['GET', 'POST'], endpoint='new')
@views.route('/<regex("[A-Z0-9]+"):app_key>/edit/', methods=['GET', 'POST'])
@xhr_form
def edit(app_key=None):
create = app_key is None
app = None if create else get_app(app_key)
form = forms.ApplicationForm(obj=app)
if form.validate_on_submit():
session = db.Session()
if create:
app = models.Application()
form.populate_obj(app)
session.add(app)
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('edit-application.html',
form=form, create=create, app=app)
@views.route('/<regex("[A-Z0-9]+"):app_key>/builds/<build>/')
def build(app_key, build):
app = get_app(app_key)
build = db.get_one_or_abort(app.builds.filter_by(tag=build))
return render_template('build.html', app=app, build=build)
@views.route('/<regex("[A-Z0-9]+"):app_key>/templates/new/',
methods=['GET', 'POST'], endpoint='new_template')
@views.route('/<regex("[A-Z0-9]+"):app_key>/templates/<int:template_id>/edit/',
methods=['GET', 'POST'])
@xhr_form
def edit_template(app_key, template_id=None):
create = template_id is None
app = get_app(app_key)
if create:
template = None
else:
template = db.get_one_or_abort(app.templates.filter_by(id=template_id))
form = forms.TemplateForm(obj=template)
if form.validate_on_submit():
session = db.Session()
if create:
template = models.DeploymentTemplate(application=app)
form.populate_obj(template)
session.add(template)
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('edit-deployment-template.html',
form=form, create=create, app=app, tpl=template)
@views.route(
'/<regex("[A-Z0-9]+"):app_key>/templates/<int:template_id>/delete/',
methods=['GET', 'POST']
)
@xhr_form
def delete_template(app_key, template_id):
app = get_app(app_key)
template = db.get_one_or_abort(app.templates.filter_by(id=template_id))
form = forms.ConfirmationForm()
if form.validate_on_submit():
session = db.Session()
session.delete(template)
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('confirm-delete-template.html', form=form,
tpl=template)
@views.route('/<regex("[A-Z0-9]+"):app_key>/builds/<build>/deploy/',
methods=['GET', 'POST'])
@xhr_form
def deploy(app_key, build):
app = get_app(app_key)
build = db.get_one_or_abort(app.builds.filter_by(tag=build))
form = forms.DeploymentSetupForm(app)
if form.validate_on_submit():
instance = build.deploy(form.data['host'],
form.data['template'].template)
session = db.Session()
session.add(instance)
route = Route(instance=instance, routing_key=form.data['hostname'])
session.add(route)
client = instance.host.get_client()
child_ip = get_container_ip(client, instance.container)
parent_ip = get_container_ip(
client,
current_app.config['FRONTEND_NAME']
)
allow_traffic(parent_ip, child_ip, 5510)
session.commit()
route.update(current_app.config['FRONTEND_NAME'])
return redirect(url_for('.instance', app_key=app.key,
container_id=instance.container[:10]))
return render_template('deploy-setup.html', form=form, app=app,
build=build)
@views.route('/<regex("[A-Z0-9]+"):app_key>/instances/<container_id>/')
def instance(app_key, container_id):
app = get_app(app_key)
instance = db.get_one_or_abort(app.instances.filter(
models.Instance.container.startswith(container_id)))
return render_template('instance.html', app=app, instance=instance)
@views.route('/<regex("[A-Z0-9]+"):app_key>/instances/<container_id>/stop/',
methods=['GET', 'POST'])
@xhr_form
def stop(app_key, container_id):
app = get_app(app_key)
instance = db.get_one_or_abort(app.instances.filter(
models.Instance.container.startswith(container_id)))
form = forms.ConfirmationForm()
if form.validate_on_submit():
session = db.Session()
instance.stop()
for route in instance.routes:
route.update(current_app.config['FRONTEND_NAME'])
session.commit()
return redirect(url_for('.app', app_key=app.key))
return render_template('confirm-stop-instance.html', form=form,
instance=instance)
|
{
"content_hash": "b056afebbc27661a0014df8a7640cf66",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 79,
"avg_line_length": 32.08187134502924,
"alnum_prop": 0.6208530805687204,
"repo_name": "GaretJax/docker-deployer",
"id": "4f10d1c66cd24cc3511c486c0a27e46e84095b80",
"size": "5486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deployer/applications/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "282507"
},
{
"name": "CoffeeScript",
"bytes": "312"
},
{
"name": "Python",
"bytes": "11474"
},
{
"name": "Ruby",
"bytes": "1203"
}
],
"symlink_target": ""
}
|
import logging
import os
import traceback
import fuel_health.common.ssh
from fuel_health.common.utils.data_utils import rand_name
import fuel_health.nmanager
import fuel_health.test
LOG = logging.getLogger(__name__)
class HeatBaseTest(fuel_health.nmanager.NovaNetworkScenarioTest):
"""Base class for Heat openstack sanity and smoke tests."""
@classmethod
def setUpClass(cls):
super(HeatBaseTest, cls).setUpClass()
cls.flavors = []
if cls.manager.clients_initialized:
if cls.heat_client is None:
cls.fail('Heat is unavailable.')
cls.wait_interval = cls.config.compute.build_interval
cls.wait_timeout = cls.config.compute.build_timeout
@classmethod
def tearDownClass(cls):
super(HeatBaseTest, cls).tearDownClass()
if cls.flavors:
try:
[cls.compute_client.flavors.delete(flavor)
for flavor in cls.flavors]
except Exception:
LOG.debug(traceback.format_exc())
def setUp(self):
super(HeatBaseTest, self).setUp()
self.check_clients_state()
if not self.find_micro_flavor():
self.fail('m1.micro flavor was not created.')
@staticmethod
def _list_stacks(client):
return client.stacks.list()
def _find_stack(self, client, key, value):
for stack in self._list_stacks(client):
if hasattr(stack, key) and getattr(stack, key) == value:
return stack
return None
def _create_stack(self, client, template,
disable_rollback=True, parameters={}):
stack_name = rand_name('ost1_test-')
client.stacks.create(stack_name=stack_name,
template=template,
parameters=parameters,
disable_rollback=disable_rollback)
self.addCleanup(self._delete_stack, stack_name)
# heat client doesn't return stack details after creation
# so need to request them:
stack = self._find_stack(client, 'stack_name', stack_name)
return stack
def _delete_stack(self, stack_name):
LOG.debug("Deleting stack: %s" % stack_name)
stack = self._find_stack(self.heat_client, 'stack_name', stack_name)
if stack is None:
return
try:
self.heat_client.stacks.delete(stack.id)
except Exception:
LOG.debug(traceback.format_exc())
self.fail("Cleanup: Failed to delete stack '%s'" % stack_name)
self._wait_for_stack_deleted(stack.id)
LOG.debug("Resource '%s' has been deleted." % stack_name)
def _update_stack(self, client, stack_id, template, parameters={}):
client.stacks.update(stack_id=stack_id,
template=template,
parameters=parameters)
return self._find_stack(client, 'id', stack_id)
def _wait_for_stack_status(self, stack_id, expected_status,
timeout=None, interval=None):
"""The method is a customization of test.status_timeout().
It addresses `stack_status` instead of `status` field and
checks for FAILED instead of ERROR status.
The rest is the same.
"""
if timeout is None:
timeout = self.wait_timeout
if interval is None:
interval = self.wait_interval
def check_status():
stack = self.heat_client.stacks.get(stack_id)
new_status = stack.stack_status
if 'FAIL' in new_status:
self.fail("Failed to get to expected status. "
"In %s state." % new_status)
elif new_status == expected_status:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
stack, expected_status, new_status)
if not fuel_health.test.call_until_true(check_status,
timeout,
interval):
self.fail("Timed out waiting to become %s"
% expected_status)
def _wait_for_stack_deleted(self, stack_id):
f = lambda: self._find_stack(self.heat_client, 'id', stack_id) is None
if not fuel_health.test.call_until_true(f,
self.wait_timeout,
self.wait_interval):
self.fail("Timed out waiting for stack to be deleted.")
def _wait_for_autoscaling(self, exp_count,
timeout, interval, reduced_stack_name):
LOG.info('expected count is {0}'.format(exp_count))
def count_instances(reduced_stack_name):
res = []
_list = self.compute_client.servers.list()
for server in _list:
LOG.info('instance name is {0}'.format(server.name))
if server.name.startswith(reduced_stack_name):
res.append(server)
LOG.info('!!! current res is {0}'.format(res))
return len(res) == exp_count
return fuel_health.test.call_until_true(
count_instances, timeout, interval, reduced_stack_name)
def _wait_for_vm_ready_for_load(self, conn_string, timeout, interval):
"""Wait for fake file to be created on the instance
to make sure that vm is ready.
"""
cmd = (conn_string +
" 'touch /tmp/ostf-heat.txt; "
"test -f /tmp/ostf-heat.txt && echo -ne YES || echo -ne NO'")
def check():
return self._run_ssh_cmd(cmd)[0] == "YES"
return fuel_health.test.call_until_true(
check, timeout, interval)
def _save_key_to_file(self, key):
return self._run_ssh_cmd(
"KEY=`mktemp`; echo '%s' > $KEY; "
"chmod 600 $KEY; echo -ne $KEY;" % key)[0]
def _delete_key_file(self, filepath):
self._run_ssh_cmd("rm -f %s" % filepath)
def _load_vm_cpu(self, connection_string):
self._run_ssh_cmd(connection_string + " 'rm -f /tmp/ostf-heat.txt'")
return self._run_ssh_cmd(connection_string +
" 'cat /dev/urandom |"
" gzip -9 > /dev/null &'")[0]
def _release_vm_cpu(self, connection_string):
pid = self._run_ssh_cmd(connection_string +
' ps -ef | grep \"cat /dev/urandom\" '
'| grep -v grep | awk \"{print $1}\"')[0]
return self._run_ssh_cmd(connection_string +
" kill -9 %s" % pid.strip())[0]
@staticmethod
def _load_template(file_name):
"""Load specified template file from etc directory."""
filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "etc", file_name)
with open(filepath) as f:
return f.read()
@staticmethod
def _customize_template(template):
"""By default, heat templates expect neutron subnets to be available.
But if nova-network is used instead of neutron then
subnet usage should be removed from the template.
"""
return '\n'.join(line for line in template.splitlines()
if 'Ref: Subnet' not in line)
def _get_stack_instances(self, stack_id):
servers = self.heat_client.stacks.get(stack_id).outputs
server_ids = [server['output_value'] for server in servers]
LOG.info('SERVERS {0}'.format(server_ids))
return server_ids
def _get_instances_by_name_mask(self, mask_name):
self.instances = []
# find just created instance
instance_list = self.compute_client.servers.list()
LOG.info('Instances list is {0}'.format(instance_list))
LOG.info('Expected instance name includes {0}'.format(mask_name))
for inst in instance_list:
LOG.info('Instance name is {0}'.format(inst.name))
if inst.name.startswith(mask_name):
self.instances.append(inst)
return self.instances
|
{
"content_hash": "a6dc6638dc3c5af0cfca6b15a76bffa3",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 78,
"avg_line_length": 38.13302752293578,
"alnum_prop": 0.5575604474918802,
"repo_name": "mcloudv/fuel-ostf",
"id": "f7c69fd298d2c62de777e1dd16a4a64dbf1c79e1",
"size": "9020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel_health/heatmanager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "404"
},
{
"name": "Python",
"bytes": "594650"
},
{
"name": "Shell",
"bytes": "6024"
}
],
"symlink_target": ""
}
|
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
{
"content_hash": "34b0a689b18308abcf14a87f5f361b41",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 43,
"avg_line_length": 31,
"alnum_prop": 0.8064516129032258,
"repo_name": "cjmabry/PoliChart",
"id": "7d42edc2a0f344acbfbff7c6683262a2403bbdec",
"size": "87",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "polichart/extensions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9548"
},
{
"name": "HTML",
"bytes": "20198"
},
{
"name": "JavaScript",
"bytes": "18610"
},
{
"name": "Python",
"bytes": "28726"
}
],
"symlink_target": ""
}
|
from accounts.cqrs import app
from rest_framework.authentication import BaseAuthentication
from rest_framework.authentication import get_authorization_header
from rest_framework.exceptions import AuthenticationFailed
import re
TOKEN_RE = re.compile(r'^Token (\w+)$')
class TokenAuthentication(BaseAuthentication):
def authenticate(self, request):
header = get_authorization_header(request)
if not header:
return (None, None)
try:
header = header.decode()
except UnicodeError:
raise AuthenticationFailed('Authorization header contains bad characters')
finds = TOKEN_RE.findall(header)
if not finds:
raise AuthenticationFailed('Authorization header should be in format: "Token TOKEN"')
token = finds[0]
return self.authenticate_token(token)
def authenticate_token(self, token):
user = app.get_user_by_token(token)
if user is None:
raise AuthenticationFailed('Invalid token')
elif not user.is_active:
raise AuthenticationFailed('User inactive or deleted')
return user, token
def authenticate_header(self, request):
return 'Token'
|
{
"content_hash": "032f1e55c64a7457c7c57772b5faeaa2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 97,
"avg_line_length": 33.94444444444444,
"alnum_prop": 0.679214402618658,
"repo_name": "lukaszb/cq",
"id": "7726df83e270d84e1a27b2cced04e9130032c78d",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "examples/djangoapp/accounts/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1693"
},
{
"name": "Python",
"bytes": "43837"
}
],
"symlink_target": ""
}
|
from django.shortcuts import get_object_or_404
from django.http import Http404
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
from django.contrib.syndication.views import Feed
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from actstream.models import actor_stream, model_stream, user_stream
class ObjectActivityFeed(Feed):
def get_object(self, request, content_type_id, object_id):
return get_object_or_404(ContentType, pk=content_type_id)\
.get_object_for_this_type(pk=object_id)
def title(self, obj):
return 'Activity feed from %s' % obj
def link(self, obj):
if hasattr(obj, 'get_absolute_url'):
return obj.get_absolute_url()
return reverse('actstream_actor', None,
(ContentType.objects.get_for_model(obj).pk, obj.pk))
def description(self, obj):
return 'Public activities of %s' % obj
def items(self, obj):
i = actor_stream(obj)
if i:
return i[:30]
return []
class AtomObjectActivityFeed(ObjectActivityFeed):
feed_type = Atom1Feed
subtitle = ObjectActivityFeed.description
class ModelActivityFeed(Feed):
def get_object(self, request, content_type_id):
return get_object_or_404(ContentType, pk=content_type_id).model_class()
def title(self, model):
return 'Activity feed from %s' % model
def link(self, model):
return reverse('actstream_model', None, (ContentType.objects.get_for_model(model).pk,))
def description(self, model):
return 'Public activities of %s' % model
def items(self, model):
i = model_stream(model)
if i:
return i[:30]
return []
class AtomModelActivityFeed(ModelActivityFeed):
feed_type = Atom1Feed
subtitle = ModelActivityFeed.description
class UserActivityFeed(Feed):
def get_object(self, request):
if request.user.is_authenticated():
return request.user
def title(self, user):
return 'Activity feed for your followed actors'
def link(self, user):
if not user:
return reverse('actstream')
if hasattr(user, 'get_absolute_url'):
return user.get_absolute_url()
return reverse('actstream_actor', None,
(ContentType.objects.get_for_model(user).pk, user.pk))
def description(self, user):
return 'Public activities of actors you follow'
def items(self, user):
i = user_stream(user)
if i:
return i[:30]
return []
class AtomUserActivityFeed(UserActivityFeed):
feed_type = Atom1Feed
subtitle = UserActivityFeed.description
|
{
"content_hash": "7dbb18a931fd19cb47b9349639c78750",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 95,
"avg_line_length": 32.26966292134831,
"alnum_prop": 0.6340529247910863,
"repo_name": "netconstructor/django-activity-stream",
"id": "e2b9fd5f2368ce12d1600613bce6e8725d73b747",
"size": "2872",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "actstream/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from .compat import unittest
import ucl
import json
import os.path
import glob
import re
TESTS_SCHEMA_FOLDER = '../tests/schema/*.json'
comment_re = re.compile('\/\*((?!\*\/).)*?\*\/', re.DOTALL | re.MULTILINE)
def json_remove_comments(content):
return comment_re.sub('', content)
class ValidationTest(unittest.TestCase):
def validate(self, jsonfile):
def perform_test(schema, data, valid, description):
msg = '%s (valid=%r)' % (description, valid)
if valid:
self.assertTrue(ucl.validate(schema, data), msg)
else:
with self.assertRaises(ucl.SchemaError):
ucl.validate(schema, data)
self.fail(msg) # fail() will be called only if SchemaError is not raised
with open(jsonfile) as f:
try:
# data = json.load(f)
data = json.loads(json_remove_comments(f.read()))
except ValueError as e:
raise self.skipTest('Failed to load JSON: %s' % str(e))
for testgroup in data:
for test in testgroup['tests']:
perform_test(testgroup['schema'], test['data'],
test['valid'], test['description'])
@classmethod
def setupValidationTests(cls):
"""Creates each test dynamically from a folder"""
def test_gen(filename):
def run_test(self):
self.validate(filename)
return run_test
for jsonfile in glob.glob(TESTS_SCHEMA_FOLDER):
testname = os.path.splitext(os.path.basename(jsonfile))[0]
setattr(cls, 'test_%s' % testname, test_gen(jsonfile))
ValidationTest.setupValidationTests()
|
{
"content_hash": "34f4f98e1f3e257b965e742d99a3fe9a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 92,
"avg_line_length": 34.72,
"alnum_prop": 0.5754608294930875,
"repo_name": "fichtner/libucl",
"id": "f7c853ad69a75a4ec5afb71869a35d752d6b4a12",
"size": "1736",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/tests/test_validation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "590945"
},
{
"name": "C++",
"bytes": "15053"
},
{
"name": "CMake",
"bytes": "9014"
},
{
"name": "Haskell",
"bytes": "5338"
},
{
"name": "Lua",
"bytes": "881"
},
{
"name": "M4",
"bytes": "34366"
},
{
"name": "Makefile",
"bytes": "6379"
},
{
"name": "PHP",
"bytes": "392"
},
{
"name": "Pascal",
"bytes": "20"
},
{
"name": "Python",
"bytes": "7918"
},
{
"name": "Shell",
"bytes": "3859"
}
],
"symlink_target": ""
}
|
class Solution:
def scoreOfParentheses(self, S):
st = []
for x in S:
if x == '(': st.append(x)
else:
temp = []
while st[-1] != '(':
temp.append(st.pop())
st.pop()
st.append(sum(temp)*2 or 1)
return sum(st)
print(Solution().scoreOfParentheses("()"))
print(Solution().scoreOfParentheses("(())"))
print(Solution().scoreOfParentheses("()()"))
print(Solution().scoreOfParentheses("(()(()))"))
|
{
"content_hash": "a5338a9069106fa169bee27413bd5a1d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 32.8125,
"alnum_prop": 0.47619047619047616,
"repo_name": "zuun77/givemegoogletshirts",
"id": "246ba00a3985b572c4ac18e651be49088c75f287",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/python/856_score-of-parentheses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "40768"
},
{
"name": "Python",
"bytes": "208749"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sys import version_info
PY3 = version_info[0] == 3
if PY3:
bytes = bytes
unicode = str
else:
bytes = str
unicode = unicode
string_types = (bytes, unicode,)
try:
import urllib2 as urllib
URLError = urllib.URLError
except ImportError:
import urllib.request as urllib
from urllib.error import URLError
try:
from contextlib import ignored
except ImportError:
from contextlib import contextmanager
@contextmanager
def ignored(*exceptions):
try:
yield
except tuple(exceptions):
pass
# note that cgi is depecrated and removed since 3.8
try:
from html import escape
except ImportError:
from cgi import escape
|
{
"content_hash": "a034d6988acc4ebe3d53477fc496149e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 65,
"avg_line_length": 17.65909090909091,
"alnum_prop": 0.7232947232947233,
"repo_name": "miso-belica/jusText",
"id": "3f0242d314512e1d02f2dc4ec7df73e27c5c87a2",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "justext/_compat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1701"
},
{
"name": "JavaScript",
"bytes": "2415"
},
{
"name": "Python",
"bytes": "68521"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import os
from admin_tools import VERSION
# taken from django-registration
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('admin_tools'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[12:] # Strip "admin_tools/" or "admin_tools\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
bitbucket_url = 'http://www.bitbucket.org/izi/django-admin-tools/'
long_desc = '''
%s
%s
''' % (open('README.rst').read(), open('CHANGELOG').read())
setup(
name='django-admin-tools',
version=VERSION.replace(' ', '-'),
description=('A collection of tools for the django administration '
'interface'),
long_description=long_desc,
author='David Jean Louis',
author_email='izimobil@gmail.com',
url=bitbucket_url,
download_url='%sdownloads/django-admin-tools-%s.tar.gz' % (bitbucket_url, VERSION),
package_dir={'admin_tools': 'admin_tools'},
packages=packages,
package_data={'admin_tools': data_files},
license='MIT License',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
{
"content_hash": "a04148be7264a1643492ab3bf9e0dd94",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 87,
"avg_line_length": 33.16949152542373,
"alnum_prop": 0.631578947368421,
"repo_name": "proft/django-admin-tools",
"id": "a8b65ed1d12ebdd1ad881975a0c53ab0078dfe86",
"size": "1979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "33680"
},
{
"name": "Python",
"bytes": "119043"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
}
|
'''
https://www.mozilla.org/en-US/projects/calendar/holidays/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.dependencies import import_required
from ..util.sampledata import package_path
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'us_holidays',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
ic = import_required('icalendar', "us_holidays data requires icalendar (http://icalendar.readthedocs.org) to be installed")
with open(package_path("USHolidays.ics")) as f:
data = ic.Calendar.from_ical(f.read())
return sorted((comp.get("dtstart").dt, str(comp.get("summary"))) for comp in data.walk() if comp.name == "VEVENT")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
us_holidays = _read_data()
|
{
"content_hash": "3ff1908be0241c47353511516281756b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 127,
"avg_line_length": 33.75,
"alnum_prop": 0.2767195767195767,
"repo_name": "ericmjl/bokeh",
"id": "2ec707f222f51b5a3cba1684a984ab19a52ce6be",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/us_holidays.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
}
|
import os, requests, random, time
from selenium_test_case import SeleniumTestCase, slow, online, wd, host
import tests
from tests.pages import signup_page, accounts_page, profile_page
from nose.tools import assert_equals, assert_in, assert_greater_equal, assert_items_equal, raises
class TestSignupAndAccounts(SeleniumTestCase):
def create_user(self, given_name, surname):
# the email suffex @test-impactstory.org is used to decide what accounts to delete,
# so don't change without changing the DELETE /tests endpoint in webapp
test_email = "test" + str(random.randint(1000, 9999)) + "@test-impactstory.org"
self.page.fill_signup_form(given_name, surname, test_email, "pass123")
def connect_accounts(self, account_testing_data):
self.page.start_connected_accounts()
for account_name in account_testing_data:
print "Connecting account", account_name
assert_equals(self.page.is_account_connected(account_name), False)
self.page.fill_account_tile(account_name, account_testing_data[account_name]["username"])
# assert_equals(self.page.is_account_connected(account_name), True)
self.page.finish_connected_accounts()
def check_products(self, account_testing_data):
total_products = sum([account_testing_data[account]["number_products"] for account in account_testing_data])
# assert_equals(self.profile_page.number_products, total_products)
for account_name in account_testing_data:
print "Checking account", account_name
for product in account_testing_data[account_name]["products"]:
if "title" in product:
title = product["title"]
print title
assert_in(title, self.profile_page.product_titles)
if "awards" in product:
self.awards_equal(self.profile_page.awards(title), product["awards"])
# if "hover_stats" in product:
# self.hover_stats_equal(self.profile_page.hover_stats(title), product["hover_stats"])
def check_profile_page(self, full_name):
assert_equals(self.profile_page.name, full_name)
def awards_equal(self, list_a, list_b):
print list_a, list_b
list_a = [stat.replace("highly ", "") for stat in list_a]
list_b = [stat.replace("highly ", "") for stat in list_b]
assert_items_equal(list_a, list_b)
def hover_stats_equal(self, list_a, list_b):
print list_a, list_b
assert_items_equal([stat_a["metric_name"] for stat_a in list_a], [stat_b["metric_name"] for stat_b in list_b])
for stat_a in list_a:
relevent_metric_dict = [stat_b for stat_b in list_b if stat_b["metric_name"]==stat_a["metric_name"]]
assert_greater_equal(int(stat_a["stat"]), int(relevent_metric_dict[0]["stat"]))
def test_signup_and_accounts(self):
self.page = signup_page.SignupPage(self.wd, self.host)
given_name = "Clark"
surname = "Kent"
account_testing_data = {
"orcid": {
"username": "0000-0001-6187-6610",
"number_products": 4,
"products": [{
"title": "How and why scholars cite on Twitter",
"awards": [u'cited', u'saved', u'saved', u'discussed'],
"hover_stats": [{'stat': u'16', 'metric_name': u'Scopus citations'}, {'stat': u'129', 'metric_name': u'Mendeley readers'}, {'stat': u'1', 'metric_name': u'Delicious bookmark'}, {'stat': u'18', 'metric_name': u'Altmetric.com tweets'}]
}]
}
# ,
# "github": {
# "username": "tjv",
# "number_products": 4,
# "products": [{
# "title": "hapnotes",
# "awards": [u'recommended', u'cited'],
# "hover_stats": [{'stat': u'1', 'metric_name': u'GitHub star'}, {'stat': u'1', 'metric_name': u'GitHub fork'}]
# }]
# },
# "figshare": {
# "username": "http://figshare.com/authors/Jason_Priem/100944",
# "number_products": 2,
# "products": [{
# "title": "Toward a comprehensive impact report for every software project",
# "awards": [u'discussed', u'viewed', u'saved', u'discussed'],
# "hover_stats": [{'stat': u'16', 'metric_name': u'figshare shares'}, {'stat': u'344', 'metric_name': u'figshare views'}, {'stat': u'4', 'metric_name': u'Delicious bookmarks'}, {'stat': u'30', 'metric_name': u'Altmetric.com tweets'}]
# }]
# },
# "slideshare": {
# "username": "jaybhatt",
# "number_products": 9,
# "products": [{
# "title": "ENDNOTE presentation",
# "awards": [u'recommended', u'saved', u'discussed', u'viewed'],
# "hover_stats": [{'stat': u'3', 'metric_name': u'SlideShare favorites'}, {'stat': u'1', 'metric_name': u'Delicious bookmark'}, {'stat': u'1', 'metric_name': u'SlideShare comment'}, {'stat': u'7603', 'metric_name': u'SlideShare views'}]
# }]
# }
# ,
# "google_scholar": {
# "username": "http://scholar.google.ca/citations?user=AwwuwS0AAAAJ",
# "number_products": 0
# }
}
self.page.get()
self.wd.maximize_window()
self.create_user(given_name, surname)
self.page = accounts_page.AccountsPage(self.wd, self.host, self.page.url_slug)
self.connect_accounts(account_testing_data)
# self.page.url_slug = "ClarkKent" #comment
self.profile_page = profile_page.ProfilePage(self.wd, self.host, self.page.url_slug)
# comment this out after get refresh code
self.profile_page.get() #comment
print self.wd.current_url
print "waiting till done updating"
self.profile_page.wait_till_done_updating()
self.check_profile_page(given_name+" "+surname)
print "DONE updating"
self.check_products(account_testing_data)
|
{
"content_hash": "3a959db57f5c477535d2ff7c44aaa32d",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 256,
"avg_line_length": 43.97931034482759,
"alnum_prop": 0.5582562333385604,
"repo_name": "Impactstory/impactstory-tester",
"id": "8cd52031ecf0e0019d0669e5afec8b0666dd5ea2",
"size": "6377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_signup_and_accounts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38842"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('coopInfo', '0003_auto_20150129_1128'),
]
operations = [
migrations.AddField(
model_name='cooperative',
name='is990present',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='cooperative',
name='streetAddress',
field=models.CharField(max_length=150),
preserve_default=True,
),
]
|
{
"content_hash": "afc154ebbbd46ea5cfc422de6d6ee094",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 53,
"avg_line_length": 25.08,
"alnum_prop": 0.580542264752791,
"repo_name": "antoineclaval/ruralpowerproject",
"id": "b654cb330f9a9fea7e78b26d5ba9677e30105a98",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ruralpowerproject/coopInfo/migrations/0004_auto_20150203_1258.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2614"
},
{
"name": "HTML",
"bytes": "23995"
},
{
"name": "JavaScript",
"bytes": "3257"
},
{
"name": "PLpgSQL",
"bytes": "75440"
},
{
"name": "Python",
"bytes": "58310"
}
],
"symlink_target": ""
}
|
"""
rohmu - timestamp handling
Copyright (c) 2017 Ohmu Ltd
See LICENSE for details
"""
import datetime
import dateutil.parser
import dateutil.tz
def parse_timestamp(ts, *, with_tz=True, assume_local=False):
"""Parse a given timestamp and return a datetime object with or without tzinfo.
If `with_tz` is False and we can't parse a timezone from the timestamp the datetime object is returned
as-is and we assume the timezone is whatever was requested. If `with_tz` is False and we can parse a
timezone, the timestamp is converted to either local or UTC time based on `assume_local` after which tzinfo
is stripped and the timestamp is returned.
When `with_tz` is True and there's a timezone in the timestamp we return it as-is. If `with_tz` is True
but we can't parse a timezone we add either local or UTC timezone to the datetime based on `assume_local`.
"""
parse_result = dateutil.parser.parse(ts)
# pylint thinks dateutil.parser.parse always returns a tuple even though we didn't request it.
# So this check is pointless but convinces pylint that we really have a datetime object now.
dt = parse_result[0] if isinstance(parse_result, tuple) else parse_result # pylint: disable=unsubscriptable-object
if with_tz is False:
if not dt.tzinfo:
return dt
tz = dateutil.tz.tzlocal() if assume_local else datetime.timezone.utc
return dt.astimezone(tz).replace(tzinfo=None)
if dt.tzinfo:
return dt
tz = dateutil.tz.tzlocal() if assume_local else datetime.timezone.utc
return dt.replace(tzinfo=tz)
|
{
"content_hash": "5743f977251b4ae1db7c9296e832c05d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 119,
"avg_line_length": 40.25,
"alnum_prop": 0.7186335403726708,
"repo_name": "saaros/pghoard",
"id": "5b114c92932b19363764ba9ee45754df55fd20d3",
"size": "1610",
"binary": false,
"copies": "2",
"ref": "refs/heads/basebackup_schedule",
"path": "pghoard/rohmu/dates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1692"
},
{
"name": "Python",
"bytes": "344087"
}
],
"symlink_target": ""
}
|
"""
Component to interface with various sensors that can be monitored.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor/
"""
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.components import (
wink, zwave, isy994, verisure, ecobee, tellduslive, mysensors,
bloomsky, vera)
DOMAIN = 'sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {
bloomsky.DISCOVER_SENSORS: 'bloomsky',
wink.DISCOVER_SENSORS: 'wink',
zwave.DISCOVER_SENSORS: 'zwave',
isy994.DISCOVER_SENSORS: 'isy994',
verisure.DISCOVER_SENSORS: 'verisure',
ecobee.DISCOVER_SENSORS: 'ecobee',
tellduslive.DISCOVER_SENSORS: 'tellduslive',
mysensors.DISCOVER_SENSORS: 'mysensors',
vera.DISCOVER_SENSORS: 'vera',
}
def setup(hass, config):
"""Track states and offer events for sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL,
DISCOVERY_PLATFORMS)
component.setup(config)
return True
|
{
"content_hash": "ba5d9b17864a78722e68583bd7ebf279",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 28.5609756097561,
"alnum_prop": 0.7241673783091375,
"repo_name": "aoakeson/home-assistant",
"id": "48dee4e169b05e22102479efcfbf2d804ef9974b",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1510170"
},
{
"name": "Python",
"bytes": "1994353"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
class ExtractionModule1:
def find_queries(html):
""" Finds queries and extracts them from websites.
Args:
html: HTML response which contains HTML text
Returns
A list of queries in the form of strings.
"""
soup = BeautifulSoup(html.text, "html.parser")
queries = []
code_blocks = soup.find_all("pre", class_="codeblock")
for block in code_blocks:
queries += [block.contents[0]]
return queries
|
{
"content_hash": "67ad7c85f2adcba9856fd629325754bd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 26.90909090909091,
"alnum_prop": 0.5388513513513513,
"repo_name": "GoogleCloudPlatform/bigquery-utils",
"id": "f6ff6b3a0fa907f172a1358a52947ef3d9293b31",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/unsupervised_dataset/sql_crawler/extraction_modules/extraction_module_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "63548"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "HCL",
"bytes": "11174"
},
{
"name": "HTML",
"bytes": "2080"
},
{
"name": "Java",
"bytes": "413997"
},
{
"name": "JavaScript",
"bytes": "87659"
},
{
"name": "Kotlin",
"bytes": "77893"
},
{
"name": "Python",
"bytes": "254890"
},
{
"name": "Shell",
"bytes": "34086"
},
{
"name": "Starlark",
"bytes": "11353"
},
{
"name": "TypeScript",
"bytes": "34337"
}
],
"symlink_target": ""
}
|
import unittest
from airflow.contrib.operators.adls_list_operator import AzureDataLakeStorageListOperator
from tests.compat import mock
TASK_ID = 'test-adls-list-operator'
TEST_PATH = 'test/*'
MOCK_FILES = ["test/TEST1.csv", "test/TEST2.csv", "test/path/TEST3.csv",
"test/path/PARQUET.parquet", "test/path/PIC.png"]
class AzureDataLakeStorageListOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.adls_list_operator.AzureDataLakeHook')
def test_execute(self, mock_hook):
mock_hook.return_value.list.return_value = MOCK_FILES
operator = AzureDataLakeStorageListOperator(task_id=TASK_ID,
path=TEST_PATH)
files = operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(
path=TEST_PATH
)
self.assertEqual(sorted(files), sorted(MOCK_FILES))
|
{
"content_hash": "e00866150bc9f9d344cf617c1120301c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 89,
"avg_line_length": 36.68,
"alnum_prop": 0.6739367502726281,
"repo_name": "owlabs/incubator-airflow",
"id": "f668b55a5a80f25e1de3aef95cecd5df012c3a9d",
"size": "1729",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/contrib/operators/test_adls_list_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/droid_interface/shared_ddi_hk_mk1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","ddi_hk_mk1_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "3a0649d50927061662a3918b08d0537e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7021276595744681,
"repo_name": "obi-two/Rebelion",
"id": "2989c094d7b224c88c80b2ae09b10cd7847d5353",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/droid_interface/shared_ddi_hk_mk1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import sys
import time
import os
sys.path.insert(0, "python")
import libxml2
test_nr = 0
test_succeed = 0
test_failed = 0
test_error = 0
#
# the testsuite description
#
CONF="xml-test-suite/xmlconf/xmlconf.xml"
LOG="check-xml-test-suite.log"
log = open(LOG, "w")
#
# Error and warning handlers
#
error_nr = 0
error_msg = ''
def errorHandler(ctx, str):
global error_nr
global error_msg
error_nr = error_nr + 1
if len(error_msg) < 300:
if len(error_msg) == 0 or error_msg[-1] == '\n':
error_msg = error_msg + " >>" + str
else:
error_msg = error_msg + str
libxml2.registerErrorHandler(errorHandler, None)
#warning_nr = 0
#warning = ''
#def warningHandler(ctx, str):
# global warning_nr
# global warning
#
# warning_nr = warning_nr + 1
# warning = warning + str
#
#libxml2.registerWarningHandler(warningHandler, None)
#
# Used to load the XML testsuite description
#
def loadNoentDoc(filename):
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return None
ctxt.replaceEntities(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if ctxt.wellFormed() != 1:
doc.freeDoc()
return None
return doc
#
# The conformance testing routines
#
def testNotWf(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print("%s: error: Well Formedness error not detected" % (id))
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEnt(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ctxt.replaceEntities(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print("%s: error: Well Formedness error not detected" % (id))
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print("%s: error: Well Formedness error not detected" % (id))
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc is None or ret != 0 or ctxt.wellFormed() == 0:
print("%s: error: wrongly failed to parse the document" % (id))
log.write("%s: error: wrongly failed to parse the document\n" % (id))
if doc != None:
doc.freeDoc()
return 0
if error_nr != 0:
print("%s: warning: WF document generated an error msg" % (id))
log.write("%s: error: WF document generated an error msg\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testError(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ctxt.wellFormed() == 0:
print("%s: warning: failed to parse the document but accepted" % (id))
log.write("%s: warning: failed to parse the document but accepte\n" % (id))
return 2
if error_nr != 0:
print("%s: warning: WF document generated an error msg" % (id))
log.write("%s: error: WF document generated an error msg\n" % (id))
return 2
return 1
def testInvalid(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ctxt.validate(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc is None:
print("%s: error: wrongly failed to parse the document" % (id))
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid == 1:
print("%s: error: Validity error not detected" % (id))
log.write("%s: error: Validity error not detected\n" % (id))
doc.freeDoc()
return 0
if error_nr == 0:
print("%s: warning: Validity error not reported" % (id))
log.write("%s: warning: Validity error not reported\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testValid(filename, id):
global error_nr
global error_msg
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt is None:
return -1
ctxt.validate(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc is None:
print("%s: error: wrongly failed to parse the document" % (id))
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid != 1:
print("%s: error: Validity check failed" % (id))
log.write("%s: error: Validity check failed\n" % (id))
doc.freeDoc()
return 0
if error_nr != 0 or valid != 1:
print("%s: warning: valid document reported an error" % (id))
log.write("%s: warning: valid document reported an error\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def runTest(test):
global test_nr
global test_succeed
global test_failed
global error_msg
global log
uri = test.prop('URI')
id = test.prop('ID')
if uri is None:
print("Test without ID:", uri)
return -1
if id is None:
print("Test without URI:", id)
return -1
base = test.getBase(None)
URI = libxml2.buildURI(uri, base)
if os.access(URI, os.R_OK) == 0:
print("Test %s missing: base %s uri %s" % (URI, base, uri))
return -1
type = test.prop('TYPE')
if type is None:
print("Test %s missing TYPE" % (id))
return -1
extra = None
if type == "invalid":
res = testInvalid(URI, id)
elif type == "valid":
res = testValid(URI, id)
elif type == "not-wf":
extra = test.prop('ENTITIES')
# print(URI)
#if extra is None:
# res = testNotWfEntDtd(URI, id)
#elif extra == 'none':
# res = testNotWf(URI, id)
#elif extra == 'general':
# res = testNotWfEnt(URI, id)
#elif extra == 'both' or extra == 'parameter':
res = testNotWfEntDtd(URI, id)
#else:
# print("Unknown value %s for an ENTITIES test value" % (extra))
# return -1
elif type == "error":
res = testError(URI, id)
else:
# TODO skipped for now
return -1
test_nr = test_nr + 1
if res > 0:
test_succeed = test_succeed + 1
elif res == 0:
test_failed = test_failed + 1
elif res < 0:
test_error = test_error + 1
# Log the ontext
if res != 1:
log.write(" File: %s\n" % (URI))
content = test.content.strip()
while content[-1] == '\n':
content = content[0:-1]
if extra != None:
log.write(" %s:%s:%s\n" % (type, extra, content))
else:
log.write(" %s:%s\n\n" % (type, content))
if error_msg != '':
log.write(" ----\n%s ----\n" % (error_msg))
error_msg = ''
log.write("\n")
return 0
def runTestCases(case):
profile = case.prop('PROFILE')
if profile != None and \
profile.find("IBM XML Conformance Test Suite - Production") < 0:
print("=>", profile)
test = case.children
while test != None:
if test.name == 'TEST':
runTest(test)
if test.name == 'TESTCASES':
runTestCases(test)
test = test.next
conf = loadNoentDoc(CONF)
if conf is None:
print("Unable to load %s" % CONF)
sys.exit(1)
testsuite = conf.getRootElement()
if testsuite.name != 'TESTSUITE':
print("Expecting TESTSUITE root element: aborting")
sys.exit(1)
profile = testsuite.prop('PROFILE')
if profile != None:
print(profile)
start = time.time()
case = testsuite.children
while case != None:
if case.name == 'TESTCASES':
old_test_nr = test_nr
old_test_succeed = test_succeed
old_test_failed = test_failed
old_test_error = test_error
runTestCases(case)
print(" Ran %d tests: %d succeeded, %d failed and %d generated an error" % (
test_nr - old_test_nr, test_succeed - old_test_succeed,
test_failed - old_test_failed, test_error - old_test_error))
case = case.next
conf.freeDoc()
log.close()
print("Ran %d tests: %d succeeded, %d failed and %d generated an error in %.2f s." % (
test_nr, test_succeed, test_failed, test_error, time.time() - start))
|
{
"content_hash": "99f9d39a6c959c254f6ac5b7b1f9e2f7",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 86,
"avg_line_length": 25.167076167076168,
"alnum_prop": 0.5693644440105438,
"repo_name": "nwjs/chromium.src",
"id": "cecb59b77f24c0c3a37f3c33bbb3634e5a810fdf",
"size": "10265",
"binary": false,
"copies": "11",
"ref": "refs/heads/nw70",
"path": "third_party/libxml/src/check-xml-test-suite.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
URLconf for registration and activation, using django-registration's
one-step backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.simple.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead.
"""
from django.conf.urls import *
from django.views.generic import TemplateView
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
url(r'^register/$',
register,
{'backend': 'registration.backends.simple.SimpleBackend'},
name='registration_register'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
(r'', include('registration.auth_urls')),
)
|
{
"content_hash": "45740ef0e9ba931c368724cb4a82dc82",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 103,
"avg_line_length": 35.91891891891892,
"alnum_prop": 0.6380737396538751,
"repo_name": "jlovison/django-registration",
"id": "18e738cf2c9413c907745251d0aee65a200b8cda",
"size": "1329",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "registration/backends/simple/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "103131"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
__version__ = 2015.02
from PTMCMCSampler import *
def test():
# Run some tests here
print("{0} tests have passed".format(0))
|
{
"content_hash": "9e2c9f027fc9525b9e4489694abcc474",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 64,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.6865671641791045,
"repo_name": "vhaasteren/PTMCMCSampler",
"id": "1c91ff5a686420a8fbca63815a99fb90478e64ed",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PTMCMCSampler/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36577"
},
{
"name": "Shell",
"bytes": "407"
}
],
"symlink_target": ""
}
|
import os
import testscenarios
import nova.conf
from nova.tests import fixtures
from nova.tests.fixtures import api_paste as api_paste_fixture
from nova.tests.functional import api_samples_test_base
CONF = nova.conf.CONF
# API samples heavily uses testscenarios. This allows us to use the
# same tests, with slight variations in configuration to ensure our
# various ways of calling the API are compatible. Testscenarios works
# through the class level ``scenarios`` variable. It is an array of
# tuples where the first value in each tuple is an arbitrary name for
# the scenario (should be unique), and the second item is a dictionary
# of attributes to change in the class for the test.
#
# By default we're running scenarios for 2 situations
#
# - Hitting the default /v2 endpoint with the v2.1 Compatibility stack
#
# - Hitting the default /v2.1 endpoint
#
# Things we need to set:
#
# - api_major_version - what version of the API we should be hitting
#
# - microversion - what API microversion should be used
#
# - _additional_fixtures - any additional fixtures need
#
# NOTE(sdague): if you want to build a test that only tests specific
# microversions, then replace the ``scenarios`` class variable in that
# test class with something like:
#
# [("v2_11", {'api_major_version': 'v2.1', 'microversion': '2.11'})]
class ApiSampleTestBaseV21(testscenarios.WithScenarios,
api_samples_test_base.ApiSampleTestBase):
SUPPORTS_CELLS = False
api_major_version = 'v2'
# any additional fixtures needed for this scenario
_additional_fixtures = []
sample_dir = None
# Include the project ID in request URLs by default. This is overridden
# for certain `scenarios` and by certain subclasses.
# Note that API sample tests also use this in substitutions to validate
# that URLs in responses (e.g. location of a server just created) are
# correctly constructed.
USE_PROJECT_ID = True
# Availability zones for the API samples tests. Can be overridden by
# sub-classes. If set, the AvailabilityZoneFilter is not used.
availability_zones = ['us-west']
scenarios = [
# test v2 with the v2.1 compatibility stack
('v2', {
'api_major_version': 'v2'}),
# test v2.1 base microversion
('v2_1', {
'api_major_version': 'v2.1'}),
# test v2.18 code without project id
('v2_1_noproject_id', {
'api_major_version': 'v2.1',
'USE_PROJECT_ID': False,
'_additional_fixtures': [
api_paste_fixture.ApiPasteNoProjectId]})
]
def setUp(self):
self.flags(glance_link_prefix=self._get_glance_host(),
compute_link_prefix=self._get_host(),
group='api')
# load any additional fixtures specified by the scenario
for fix in self._additional_fixtures:
self.useFixture(fix())
if not self.SUPPORTS_CELLS:
# NOTE(danms): Disable base automatic DB (and cells) config
self.USES_DB = False
self.USES_DB_SELF = True
# super class call is delayed here so that we have the right
# paste and conf before loading all the services, as we can't
# change these later.
super(ApiSampleTestBaseV21, self).setUp()
if not self.SUPPORTS_CELLS:
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
self.useFixture(fixtures.DefaultFlavorsFixture())
self.useFixture(fixtures.SingleCellSimple())
super(ApiSampleTestBaseV21, self)._setup_services()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
# this is used to generate sample docs
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
if self.availability_zones:
self.useFixture(
fixtures.AvailabilityZoneFixture(self.availability_zones))
def _setup_services(self):
pass
def _setup_scheduler_service(self):
"""Overrides _IntegratedTestBase._setup_scheduler_service to filter
out the AvailabilityZoneFilter prior to starting the scheduler.
"""
if self.availability_zones:
# The test is using fake zones so disable the
# AvailabilityZoneFilter which is otherwise enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
if 'AvailabilityZoneFilter' in enabled_filters:
enabled_filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=enabled_filters,
group='filter_scheduler')
return super(ApiSampleTestBaseV21, self)._setup_scheduler_service()
|
{
"content_hash": "ec637fa44ddd4e543d925ec518a81391",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 75,
"avg_line_length": 38.604838709677416,
"alnum_prop": 0.6634635471067475,
"repo_name": "mahak/nova",
"id": "be8e07a9411971195096af087cfe9a8acc7222c3",
"size": "5389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/api_sample_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
"""Script to load cnv data
To run: python load.py config_file
"""
import sys
from bigquery_etl.load import load_data_from_file
import json
import os
from bigquery_etl.utils.logging_manager import configure_logging
def load(config):
"""
Load the bigquery table
load_data_from_file accepts following params:
project_id, dataset_id, table_name, schema_file, data_path,
source_format, write_disposition, poll_interval, num_retries
"""
log = configure_logging('cnv_load', 'logs/cnv_load.log')
log.info('begin load of cnv into bigquery')
schemas_dir = os.environ.get('SCHEMA_DIR', 'schemas/')
#print "Loading Methylation 450K data into BigQuery.."
#load_data_from_file.run(
# config['project_id'],
# config['bq_dataset'],
# config['methylation']['bq_table'],
# schemas_dir + config['methylation']['schema_file'],
# 'gs://' + config['buckets']['open'] + '/' +\
# config['methylation']['output_dir'] + 'HumanMethylation450/*',
# 'CSV',
# 'WRITE_EMPTY'
#)
dir_prefix = config['cnv']['output_dir_prefix']
dir_suffixes = config['cnv']['output_dir_suffixes']
for dir_suffix in dir_suffixes:
log.info("\tLoading CNV data into BigQuery from %s..." % (dir_prefix + dir_suffix))
load_data_from_file.run(
config['project_id'],
config['bq_dataset'],
config['cnv']['bq_table'],
schemas_dir + config['cnv']['schema_file'],
'gs://' + config['buckets']['open'] + '/' +\
dir_prefix + dir_suffix + '*',
'NEWLINE_DELIMITED_JSON',
'WRITE_APPEND'
)
log.info("*"*30)
log.info('finished load of CNV into bigquery')
if __name__ == '__main__':
load(json.load(open(sys.argv[1])))
|
{
"content_hash": "676325cdd33a4f6e79dc4b766dd31696",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 91,
"avg_line_length": 33.7962962962963,
"alnum_prop": 0.5863013698630137,
"repo_name": "isb-cgc/ISB-CGC-data-proc",
"id": "7b0dd81fce84d91608d9991b8cd1116be4f676b9",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tcga_etl_pipeline/cnv/load.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "1169886"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflow_v2
async def sample_list_session_entity_types():
# Create a client
client = dialogflow_v2.SessionEntityTypesAsyncClient()
# Initialize request argument(s)
request = dialogflow_v2.ListSessionEntityTypesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_session_entity_types(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_v2_generated_SessionEntityTypes_ListSessionEntityTypes_async]
|
{
"content_hash": "39aaa33d397d8d5ef7eb825e394888a2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 28.25,
"alnum_prop": 0.7309734513274336,
"repo_name": "googleapis/python-dialogflow",
"id": "2d5baceb409a8d8c335817fa23673b7654e1ea01",
"size": "1980",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v2_generated_session_entity_types_list_session_entity_types_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
""" Live settings for 9ms starter """
from ninecms_starter.settings import *
DEBUG = False
ALLOWED_HOSTS = [
# ...
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# STATIC_ROOT = # ...
STATICFILES_DIRS = []
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 3 * 60 * 60,
'KEY_PREFIX': 'ninecms_starter_',
'VERSION': 1,
}
}
|
{
"content_hash": "fefb809bb304b68de31c83990713af0c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 23.4390243902439,
"alnum_prop": 0.5348595213319459,
"repo_name": "Wtower/django-ninecms-starter",
"id": "714d95680141d5361f466bb61a601f938703f464",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ninecms_starter/settings_live.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8924"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ShowlineValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showline", parent_name="layout.smith.imaginaryaxis", **kwargs
):
super(ShowlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
{
"content_hash": "c17b71388a606fb530856502f3722b11",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 33.38461538461539,
"alnum_prop": 0.6175115207373272,
"repo_name": "plotly/plotly.py",
"id": "89035731f390bb7de527c5eaaec064bc28155254",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/smith/imaginaryaxis/_showline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""Exports Redis storage controllers."""
from marconi_redis.queues.storage.redis import catalogue
from marconi_redis.queues.storage.redis import claims
from marconi_redis.queues.storage.redis import messages
from marconi_redis.queues.storage.redis import queues
from marconi_redis.queues.storage.redis import shards
ClaimController = claims.ClaimController
MessageController = messages.MessageController
QueueController = queues.QueueController
CatalogueController = catalogue.CatalogueController
ShardsController = shards.ShardsController
|
{
"content_hash": "bd860f85ec464d8a9342a5a89e92b360",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 41.69230769230769,
"alnum_prop": 0.8560885608856088,
"repo_name": "cabrera/marconi-redis",
"id": "d788305cbb1510f8ecb27581ffe4f3a8e316dbcf",
"size": "1134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marconi_redis/queues/storage/redis/controllers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31375"
}
],
"symlink_target": ""
}
|
"""
=========================================
Visualize channel over epochs as an image
=========================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
Two images are produced, one with a good channel and one with a channel
that does not show any evoked field.
It is also demonstrated how to reorder the epochs using a 1D spectral
embedding as described in:
Graph-based variability estimation in single-trial event-related neural
responses A. Gramfort, R. Keriven, M. Clerc, 2010,
Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
https://hal.inria.fr/inria-00497023
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event related fields images
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
|
{
"content_hash": "4e97c1a67e25fe02eeb632cd285b0b2a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 35.41772151898734,
"alnum_prop": 0.6290207290922087,
"repo_name": "teonlamont/mne-python",
"id": "98557011676b3a863cf2f5dce5913b52a69a4321",
"size": "2798",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "examples/visualization/plot_channel_epochs_image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4354605"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import uuid
import base64
import json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig:
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = None
self.default_root_object = default_root_object
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
DistributionConfig.__init__(self, connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary:
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution:
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled != None:
new_config.enabled = enabled
if cnames != None:
new_config.cnames = cnames
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
if expires:
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
library be installed.
"""
try:
from M2Crypto import EVP
except ImportError:
raise NotImplementedError("Boto depends on the python M2Crypto "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# if private_key_file is a file object read the key string from there
if isinstance(private_key_file, file):
private_key_string = private_key_file.read()
# Now load key and calculate signature
if private_key_string:
key = EVP.load_key_string(private_key_string)
else:
key = EVP.load_key(private_key_file)
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(str(message))
signature = key.sign_final()
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
Distribution.__init__(self, connection, config, domain_name,
id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return Distribution.startElement(self, name, attrs, connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled != None:
new_config.enabled = enabled
if cnames != None:
new_config.cnames = cnames
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
|
{
"content_hash": "bd69ddc67891a5feddac6ca263a66256",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 102,
"avg_line_length": 41.00970873786408,
"alnum_prop": 0.5584753787878788,
"repo_name": "yyuu/botornado",
"id": "a663cb769321c139b5013c4a1d118be9bb6ad9a8",
"size": "30673",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "boto/cloudfront/distribution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1974446"
}
],
"symlink_target": ""
}
|
import math
def inverse_clamp(value, liveband):
"""
Ensures value is in (-∞, -liveband] ∪ [liveband, ∞)
liveband must be a positive value
"""
if value > 0:
value = max(value, liveband)
if value < 0:
value = min(value, -liveband)
return value
def rotate(vec, degrees):
"""
Rotates a 2D vector x, y counter-clockwise by degrees degrees
"""
x, y = vec
sin = math.sin(math.radians(degrees))
cos = math.cos(math.radians(degrees))
rot_x = cos * x + -sin * y
rot_y = sin * x + cos * y
return rot_x, rot_y
|
{
"content_hash": "f27d4509cfbb6a66a30676e042761f75",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 22.28,
"alnum_prop": 0.6014362657091562,
"repo_name": "cuauv/software",
"id": "36b4da9a5be53ce912fac852b53a2f700a1c2eb0",
"size": "582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auv_math/math_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "271780"
},
{
"name": "C++",
"bytes": "2831785"
},
{
"name": "CMake",
"bytes": "5365"
},
{
"name": "CSS",
"bytes": "5082"
},
{
"name": "Dockerfile",
"bytes": "2758"
},
{
"name": "Emacs Lisp",
"bytes": "19028"
},
{
"name": "GLSL",
"bytes": "6783"
},
{
"name": "HTML",
"bytes": "3642"
},
{
"name": "Haskell",
"bytes": "4770"
},
{
"name": "JavaScript",
"bytes": "113413"
},
{
"name": "Makefile",
"bytes": "12887"
},
{
"name": "Nix",
"bytes": "16335"
},
{
"name": "OCaml",
"bytes": "3804"
},
{
"name": "PureBasic",
"bytes": "58"
},
{
"name": "Python",
"bytes": "2141765"
},
{
"name": "Scheme",
"bytes": "129544"
},
{
"name": "Shell",
"bytes": "68820"
},
{
"name": "TeX",
"bytes": "25243"
},
{
"name": "Vim script",
"bytes": "125505"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog_app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "c28a44cd25ad603bef6278f123b0811b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7074235807860262,
"repo_name": "Satchitananda/django-simple-blog",
"id": "41bb07acef0828e7675a750700f315d8467fc3bc",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5717"
},
{
"name": "Python",
"bytes": "18713"
}
],
"symlink_target": ""
}
|
"""Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import builtins
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'i'
import struct
import sys
from chunk import Chunk
def _byteswap3(data):
ba = bytearray(data)
ba[::3] = data[2::3]
ba[2::3] = data[::3]
return bytes(ba)
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != b'RIFF':
raise Error('file does not start with RIFF id')
if self._file.read(4) != b'WAVE':
raise Error('not a WAVE file')
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == b'data':
if not self._fmt_chunk_read:
raise Error('data chunk before fmt chunk')
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error('fmt chunk and/or data chunk missing')
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error('no marks')
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return b''
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
assert data.itemsize == self._sampwidth
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) // self._sampwidth
data.fromfile(chunk.file.file, nitems)
# "tell" data chunk how much was read
chunk.size_read = chunk.size_read + nitems * self._sampwidth
# do the same for the outermost chunk
chunk = chunk.file
chunk.size_read = chunk.size_read + nitems * self._sampwidth
data.byteswap()
data = data.tobytes()
else:
data = self._data_chunk.read(nframes * self._framesize)
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack_from('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack_from('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error('unknown format: %r' % (wFormatTag,))
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, str):
f = builtins.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = int(round(framerate))
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error('cannot change parameters after starting to write')
if comptype not in ('NONE',):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error('cannot change parameters after starting to write')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error('not all parameters set')
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
raise Error('setmark() not supported')
def getmark(self, id):
raise Error('no marks')
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
import array
a = array.array(_array_fmts[self._sampwidth])
a.frombytes(data)
data = a
assert data.itemsize == self._sampwidth
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file:
try:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
finally:
self._file = None
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write(b'RIFF')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, b'WAVE', b'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, b'data'))
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open # B/W compatibility
|
{
"content_hash": "5e5770877fc1145effeed96fa0b038b8",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 130,
"avg_line_length": 36.0757281553398,
"alnum_prop": 0.5900209914419506,
"repo_name": "timm/timmnix",
"id": "4a223451bf9c345df19c6f5ae9b3a4d81a51fdf4",
"size": "18579",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pypy3-v5.5.0-linux64/lib-python/3/wave.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1641"
},
{
"name": "Batchfile",
"bytes": "1234"
},
{
"name": "C",
"bytes": "436685"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Common Lisp",
"bytes": "4"
},
{
"name": "Emacs Lisp",
"bytes": "290698"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1540"
},
{
"name": "Prolog",
"bytes": "14301"
},
{
"name": "Python",
"bytes": "21267592"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "27687"
},
{
"name": "TeX",
"bytes": "3052861"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import os.path
import argparse
import numpy as np
import sympy as sp
from numpy.linalg.linalg import LinAlgError
import stats.accuracy as accuracy
import stats.estimators as estimators
import stats.methods as methods
import stats.utils as utils
################
# Declarations #
################
DESCRIPTION = 'Use this script to determine estimates accuracy'
SYM_PARAMS = sp.symbols('a b')
PRECISE_PARAMS = (0, 0.2)
SYM_X, SYM_Y = sp.symbols('x y')
# linear function
SYM_EXPR = sp.sympify('a + b*x')
SYM_EXPR_DELTA = sp.sympify('y - a - b*x')
# quadratic function
# SYM_EXPR = sp.sympify('a + b*x + c*(x**2)')
# SYM_EXPR_DELTA = sp.sympify('y - a - b*x - c*(x**2)')
# inverse function
# SYM_EXPR = sp.sympify('a + 1/(b + c*x)')
# SYM_EXPR_DELTA = sp.sympify('y - (a + 1/(b + c*x))')
# exponential function
# SYM_EXPR = sp.sympify('exp(a + b*x)')
# SYM_EXPR_DELTA = sp.sympify('y - exp(a + b*x)')
# logarithmic function
# SYM_EXPR = sp.sympify('b + c*ln(x+10)')
# SYM_EXPR_DELTA = sp.sympify('y - (b + c*ln(x+10))')
# logistic function
# SYM_EXPR = sp.sympify('1/(1+exp(-b*x))')
# SYM_EXPR_DELTA = sp.sympify('y - 1/(1+exp(-b*x))')
# sinusoidal function
# SYM_EXPR = sp.sympify('a + b*sin(2*x)')
# SYM_EXPR_DELTA = sp.sympify('y - (a + b*sin(2*x))')
MIN_X = 0
MAX_X = 10
NUM_VALS = 100 # number of source values
ERR_NUM_STD_ITER = 20 # number of stds iterations
ERR_MIN_STD_X = 0.001 # minimal std of X error values
ERR_MAX_STD_X = 2.001 # maximal std of X error values
ERR_STEP_STD_X = (ERR_MAX_STD_X - ERR_MIN_STD_X) / ERR_NUM_STD_ITER
ERR_MIN_STD_Y = 0.001 # minimal std of Y error values
ERR_MAX_STD_Y = 2.001 # maximal std of Y error values
ERR_STEP_STD_Y = (ERR_MAX_STD_Y - ERR_MIN_STD_Y) / ERR_NUM_STD_ITER
NUM_ITER = 100 # number of realizations
LSE_NUM_ITER = 1 # number of LSE iterations
################
# Program code #
################
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-o', '--output', metavar='PATH',
type=str, required=True,
help='base path to write data')
args = parser.parse_args()
output_path, _ = os.path.splitext(args.output)
print('Expression: {}'.format(SYM_EXPR))
print('Symbolic parameters: {}'.format(SYM_PARAMS))
print('Precise parameter values: {}'.format(PRECISE_PARAMS))
print('Real X: {}..{}'.format(MIN_X, MAX_X))
print('STD X: {}..{}'.format(ERR_MIN_STD_X, ERR_MAX_STD_X))
print('STD X step: {}'.format(ERR_STEP_STD_X))
print('STD Y: {}..{}'.format(ERR_MIN_STD_Y, ERR_MAX_STD_Y))
print('STD Y step: {}'.format(ERR_STEP_STD_Y))
print('Number of iterations: {}'.format(NUM_ITER))
print('Output path: {}'.format(output_path))
# get precise param values
precise_params = np.vstack(PRECISE_PARAMS)
# build precise values
precise_expr = sp.lambdify(
SYM_X,
SYM_EXPR.subs(zip(SYM_PARAMS, PRECISE_PARAMS)),
'numpy')
precise_vectorized = np.vectorize(precise_expr)
# generate array of X error stds
err_stds_x = np.linspace(ERR_MIN_STD_X, ERR_MAX_STD_X, ERR_NUM_STD_ITER)
# generate array of Y error stds
err_stds_y = np.linspace(ERR_MIN_STD_Y, ERR_MAX_STD_Y, ERR_NUM_STD_ITER)
# create meshgrid
err_stds_x, err_stds_y = np.meshgrid(err_stds_x, err_stds_y)
# collect accuracies of estimates
basic_accs = np.zeros((ERR_NUM_STD_ITER, ERR_NUM_STD_ITER))
lse_accs = np.zeros((ERR_NUM_STD_ITER, ERR_NUM_STD_ITER))
mrt_accs = np.zeros((ERR_NUM_STD_ITER, ERR_NUM_STD_ITER))
num_std_iter = ERR_NUM_STD_ITER**2
# iterate by error standard derivation values
std_iter = 0
for std_i, err_std_row in enumerate(np.dstack((err_stds_x, err_stds_y))):
for std_j, (err_std_x, err_std_y) in enumerate(err_std_row):
std_iter += 1
print('Iteration {}/{}: std X: {:.2f}, std Y: {:.2f}'.format(
std_iter, num_std_iter, err_std_x, err_std_y),
end = ' -> ')
num_lse_success_attempts = 0
num_mrt_success_attempts = 0
# iterate by error standart deviation values
for iter_i in range(NUM_ITER):
measured_vals_x, measured_vals_y = estimators.uniform(
precise_vectorized, NUM_VALS,
MIN_X, MAX_X,
err_std_x, err_std_y)
################################
# Base values for basic search #
################################
# set base values as max distant values
base_values = utils.base_values_avg(
SYM_X, SYM_Y,
measured_vals_x, measured_vals_y,
len(SYM_PARAMS))
################
# Basic search #
################
# find params with basic method
basic_params = methods.search_basic(
delta_expression=SYM_EXPR_DELTA,
parameters=SYM_PARAMS,
values=base_values
)
# print('Basic params: {}'.format(basic_params))
# add distance between estimates and real values
basic_accs[std_i, std_j] += accuracy.avg_euclidean_dst(
precise_params,
np.vstack(basic_params))
##############
# LSE search #
##############
try:
lse_params = methods.search_lse(
expression=SYM_EXPR,
parameters=SYM_PARAMS,
values={SYM_X: measured_vals_x},
result_values={SYM_Y: measured_vals_y},
init_estimates=dict(zip(SYM_PARAMS, basic_params)),
num_iter=LSE_NUM_ITER)
except LinAlgError:
pass
else:
# print('LSE params: {}'.format(lse_params))
lse_accs[std_i, std_j] += accuracy.avg_euclidean_dst(
precise_params,
np.vstack(lse_params))
num_lse_success_attempts += 1
##############
# MRT search #
##############
# find params with mrt method
try:
mrt_params = methods.search_mrt(
delta_expression=SYM_EXPR_DELTA,
parameters=SYM_PARAMS,
values={SYM_X: measured_vals_x, SYM_Y: measured_vals_y},
err_stds={SYM_X: err_std_x, SYM_Y: err_std_y}
)
except LinAlgError:
pass
else:
# print('MRT params: {}'.format(mrt_params))
mrt_accs[std_i, std_j] += accuracy.avg_euclidean_dst(
precise_params,
np.vstack(mrt_params))
num_mrt_success_attempts += 1
print('LSE: {}, MRT: {}'.format(
num_lse_success_attempts, num_mrt_success_attempts))
lse_accs[std_i, std_j] /= num_lse_success_attempts
mrt_accs[std_i, std_j] /= num_mrt_success_attempts
basic_accs /= NUM_ITER
np.save(
'{}_err-stds-x.npy'.format(output_path),
err_stds_x)
np.save(
'{}_err-stds-y.npy'.format(output_path),
err_stds_y)
np.save(
'{}_lse-accs.npy'.format(output_path),
lse_accs)
np.save(
'{}_mrt-accs.npy'.format(output_path),
mrt_accs)
|
{
"content_hash": "240f529d60970b88f1ea4b3b5da1e373",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 78,
"avg_line_length": 33.89351851851852,
"alnum_prop": 0.5480125665892638,
"repo_name": "budnyjj/NLRA",
"id": "ab88cfd43762a2c714ee8de9bb776be53322f1f6",
"size": "7345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accuracy_per_std_x-y.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109987"
}
],
"symlink_target": ""
}
|
def fuzz_it(check_sudoku=None, solve_sudoku=None, test="all", iters=10,
mutations=10, check_edges=None):
if test in ("all", "checker"):
from fuzz_checker import fuzz_checker
success = fuzz_checker(check_sudoku, check_edges)
if not success:
print "Failed fuzzing of sudoku checker"
return
else:
print "Sudoku checker passed all tests!"
if test in ("all", "solver"):
from fuzz_solver import fuzz_solver
success = fuzz_solver(check_sudoku, solve_sudoku, iters=iters,
mutates=mutations)
if not success:
print "Failed fuzzing of sudoku solver"
return
else:
print "Sudoku solver passed all tests!"
if __name__ == '__main__':
try:
from argparser import get_args
except ImportError:
try:
from optparser import get_args
except ImportError:
print """couldn't import an option parser. Run in interactive mode."""
args_dict = get_args()
import imp, os
filepath = os.path.abspath(args_dict["solver_file"])
mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
user_mod = imp.load_source(mod_name, filepath)
elif file_ext.lower() == ".pyc":
user_mod = imp.load_compiled(mod_name, filepath)
del args_dict["solver_file"]
try:
args_dict["check_sudoku"] = user_mod.check_sudoku
except AttributeError:
raise AttributeError("Module `{mod}` has no function"
"`check_sudoku`".format(mod=mod_name))
if args_dict["test"] in ("all", "solver"):
try:
args_dict["solve_sudoku"] = user_mod.solve_sudoku
except AttributeError:
raise AttributeError("Module {mod} has no function"
"`solve_sudoku`".format(mod=mod_name))
fuzz_it(**args_dict)
|
{
"content_hash": "2cf33bf3e5fb593aeae5ac5cb43eff74",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 82,
"avg_line_length": 38.6,
"alnum_prop": 0.5911917098445596,
"repo_name": "jtratner/sudoku-fuzzer-udacity",
"id": "7726377f6b340c008bc33c68549d5262a4d38742",
"size": "1930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_fuzzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14869"
}
],
"symlink_target": ""
}
|
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest # pylint: disable=import-error
else:
import unittest
|
{
"content_hash": "1d026db6a45d09ca27b61393f03174d2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 64,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.706766917293233,
"repo_name": "gamechanger/kafka-python",
"id": "da1069f8d14fed9a487e3a1796a72abedb2ad488",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "559844"
},
{
"name": "Shell",
"bytes": "2646"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: Gregor Hendel
"""
from .Observer import Observable
from .IPETMessageStream import Message
class Manager(Observable):
"""
manages all manageables of a certain type of which many objects might exist and need to be listed / browsed frequently
"""
def __init__(self, listofmanageables=[], activate=False):
"""
constructs a new Manager
constructs a new Manager by creating an empty dictionary. Fills the dictionary with a list
of manageables, if non-empty. All elements can be optionally activated.
"""
self.stringrepresentations = {}
self.activeset = set()
for manageable in listofmanageables:
self.addManageable(manageable)
if activate:
self.activate(manageable)
def addManageable(self, manageable):
"""
add manageable to dictionary - ensures that only one manageable with that string representation is stored
"""
stringrepresentation = self.getStringRepresentation(manageable)
self.stringrepresentations[stringrepresentation] = manageable
def getStringRepresentation(self, manageable):
"""
return an manageable's string representation
"""
if type(manageable) is str:
return manageable
else:
try:
return manageable.getName()
except AttributeError:
return str(manageable)
def getManageable(self, stringrepresentation):
"""
returns the manageable belonging to a string representation, or None, if no such manageable is available
"""
return self.stringrepresentations.get(stringrepresentation, None)
def deleteManageable(self, manageable):
"""
delete an manageable from the manager
"""
for key, val in list(self.stringrepresentations.items()):
if val == manageable:
oldstringrepresentation = key
break
del self.stringrepresentations[oldstringrepresentation]
self.deactivate([manageable])
def reinsertManageable(self, manageable):
"""
reinserts a manageable after a possible name change that was not
reinserts a manageable after its name has changed
"""
active = self.isActive(manageable)
self.deleteManageable(manageable)
self.addManageable(manageable)
if active:
self.activate([manageable])
def editObjectAttribute(self, manageable, attributename, newattribute):
"""
edit an objects attribute and ensure that a changed object representation is directly
processed
"""
oldname = self.getStringRepresentation(manageable)
manageable.editAttribute(attributename, newattribute)
newname = self.getStringRepresentation(manageable)
print(newname, newattribute)
if oldname != newname:
self.chgManageableName(manageable, oldname, newname)
self.notify(Message("Changed attribute %s of %s to %s" % (attributename, newname, newattribute), Message.MESSAGETYPE_INFO))
def chgManageableName(self, manageable, oldname, newname):
"""
changes a manageables name, if possible
"""
if newname != oldname:
if self.getManageable(newname) is not None:
raise KeyError("An element of name %s is already listed" % (newname))
del self.stringrepresentations[oldname]
self.stringrepresentations[newname] = manageable
def getManageables(self, onlyactive=False):
"""
returns all (or only active) manageables
"""
if onlyactive:
return list(self.activeset)
else:
return list(self.stringrepresentations.values())
def getAllRepresentations(self, onlyactive=False):
"""
returns a list of all string representations
"""
if not onlyactive:
return list(self.stringrepresentations.keys())
else:
return [self.getStringRepresentation(manageable) for manageable in self.activeset]
def activate(self, manageables):
"""
adds a manageable to the active set
"""
for manageable in manageables:
if self.getStringRepresentation(manageable) not in self.stringrepresentations:
raise KeyError("%s is not managed by this manager - call addManageable() first" % (self.getStringRepresentation(manageable)))
if not manageable in self.activeset:
self.activeset.add(manageable)
if manageables != []:
self.notify(Message("Activated %s" % ", ".join(map(self.getStringRepresentation, manageables)), messagetype=Message.MESSAGETYPE_INFO))
def addAndActivate(self, manageable):
"""
adds a manageable to the manager and activates it
"""
self.addManageable(manageable)
self.activate([manageable])
def getActiveSet(self):
"""
returns the set of active objects managed by the manager
"""
return self.activeset
def deactivate(self, manageables):
"""
removes a list of manageables from the active set of managed objects - elements stays present and can be activated again
"""
for manageable in manageables:
try:
self.activeset.remove(manageable)
except KeyError:
pass
self.notify(Message("Deactivated %s" % ", ".join(map(self.getStringRepresentation, manageables)), messagetype=Message.MESSAGETYPE_INFO))
def countManageables(self, onlyactive):
"""
count the number of (optionally only the active) manageables
"""
if onlyactive:
return len(self.activeset)
else:
return len(list(self.stringrepresentations.keys()))
def isActive(self, manageable):
"""
is the manageable part of the active set of this manager
"""
return manageable in self.activeset
|
{
"content_hash": "89ec7d57b12a22ab428f90a0bca608fb",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 146,
"avg_line_length": 36.9364161849711,
"alnum_prop": 0.6405320813771518,
"repo_name": "GregorCH/ipet",
"id": "e86ecf950a988af895204279b1a2bc27e422e507",
"size": "6390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipet/concepts/Manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7755"
},
{
"name": "Python",
"bytes": "601288"
},
{
"name": "Shell",
"bytes": "5180"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import six
try:
from importlib import import_module
except ImportError: # pragma: no cover
from django.utils.importlib import import_module
class JSONFieldDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, obj, objtype):
cache_field = '_cached_jsonfield_%s' % self.field
if not hasattr(obj, cache_field):
try:
setattr(
obj,
cache_field,
json.loads(
getattr(obj, self.field),
parse_float=Decimal,
)
)
except (TypeError, ValueError):
setattr(obj, cache_field, {})
return getattr(obj, cache_field)
def __set__(self, obj, value):
setattr(obj, '_cached_jsonfield_%s' % self.field, value)
setattr(obj, self.field, json.dumps(value, cls=DjangoJSONEncoder))
def get_object(path, fail_silently=False):
# Return early if path isn't a string (might already be an callable or
# a class or whatever)
if not isinstance(path, six.string_types): # XXX bytes?
return path
try:
return import_module(path)
except ImportError:
try:
dot = path.rindex('.')
mod, fn = path[:dot], path[dot + 1:]
return getattr(import_module(mod), fn)
except (AttributeError, ImportError):
if not fail_silently:
raise
|
{
"content_hash": "0333ce212167427e67d415d4e7588d7f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 30.60377358490566,
"alnum_prop": 0.5678175092478421,
"repo_name": "michaelkuty/leonardo-items",
"id": "5b8e3844c676efb72be789d5353a6db27d03ca03",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leonardo_items/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "905"
},
{
"name": "Python",
"bytes": "15532"
}
],
"symlink_target": ""
}
|
"""
Enums transformer
"""
import json
import logging
import re
from collections import namedtuple, OrderedDict
from model.enum import Enum
from model.enum_element import EnumElement
from transformers.common_producer import InterfaceProducerCommon
class EnumsProducer(InterfaceProducerCommon):
"""
Enums transformer
"""
def __init__(self, enum_class, key_words):
super(EnumsProducer, self).__init__(key_words=key_words)
self._container_name = 'elements'
self.enum_class = enum_class
self.logger = logging.getLogger(self.__class__.__name__)
self.param_named = namedtuple('param_named', 'origin description name since deprecated history')
self._item_name = None
@property
def container_name(self):
return self._container_name
def transform(self, item: Enum, render: dict = None) -> dict:
"""
Main entry point for transforming each Enum into output dictionary,
which going to be applied to Jinja2 template
:param item: instance of Enum
:param render: empty dictionary, present in parameter for code consistency
:return: dictionary which going to be applied to Jinja2 template
"""
item.name = self._replace_sync(item.name)
name = 'SDL{}{}'.format(item.name[:1].upper(), item.name[1:])
tmp = {self.enum_class}
imports = {'.h': tmp, '.m': tmp}
if not render:
render = OrderedDict()
render['origin'] = item.name
render['name'] = name
render['imports'] = imports
render['history'] = item.history
super(EnumsProducer, self).transform(item, render)
return render
def extract_param(self, param: EnumElement, item_name: str):
"""
Preparing self.param_named with prepared params
:param param: EnumElement from initial Model
:param item_name:
:return: self.param_named with prepared params
"""
data = {'origin': param.name,
'description': self.extract_description(param.description),
'since': param.since,
'history': param.history,
'deprecated': json.loads(param.deprecated.lower()) if param.deprecated else False}
name = None
if re.match(r'^[A-Z]{1,2}\d|\d[A-Z]{1,2}$', param.name):
name = param.name
elif re.match(r'(^[a-z\d]+$|^[A-Z\d]+$)', param.name):
name = param.name.title()
elif re.match(r'^(?=\w*[a-z])(?=\w*[A-Z])\w+$', param.name):
if param.name.endswith('ID'):
name = param.name[:-2]
else:
name = param.name[:1].upper() + param.name[1:]
elif re.match(r'^(?=\w*?[a-zA-Z])(?=\w*?[_-])(?=[0-9])?.*$', param.name):
name = []
for item in re.split('[_-]', param.name):
if re.match(r'^[A-Z\d]+$', item):
name.append(item.title())
name = ''.join(name)
if any(re.search(r'^(sdl)?({}){}$'.format(item_name.casefold(), name.casefold()), k) for k in self.key_words):
name = self._replace_keywords(name)
data['name'] = name
return self.param_named(**data)
|
{
"content_hash": "3db59b4b402787bfb0a082219a632a8a",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 118,
"avg_line_length": 39.2289156626506,
"alnum_prop": 0.5737100737100738,
"repo_name": "smartdevicelink/sdl_ios",
"id": "748f6cbe6fe111197bc65a6a794b36b39eef1dd6",
"size": "3256",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "generator/transformers/enums_producer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1311"
},
{
"name": "Jinja",
"bytes": "9097"
},
{
"name": "Mustache",
"bytes": "1587"
},
{
"name": "Objective-C",
"bytes": "6802591"
},
{
"name": "Python",
"bytes": "95846"
},
{
"name": "Ruby",
"bytes": "1875"
},
{
"name": "Shell",
"bytes": "25862"
},
{
"name": "Swift",
"bytes": "112020"
}
],
"symlink_target": ""
}
|
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from object_detection.utils import ops
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results, use_matmul_gather=False):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
use_matmul_gather: Use matrix multiplication based gather instead of
standard tf.gather. (Default: False).
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
self._gather_op = tf.gather
if use_matmul_gather:
self._gather_op = ops.matmul_gather_on_zeroth_axis
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(self.matched_column_indices())
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.size(self.unmatched_column_indices())
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
self._gather_op(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = self._gather_op(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher.
"""
__metaclass__ = ABCMeta
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
self._use_matmul_gather = use_matmul_gather
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params),
self._use_matmul_gather)
@abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
|
{
"content_hash": "25ae42b80fc4333ac11820c95c6f840f",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 80,
"avg_line_length": 36.22540983606557,
"alnum_prop": 0.693517366217898,
"repo_name": "jiaphuan/models",
"id": "4c0a9c811957bb06c883b249eee5cee01258efe3",
"size": "9529",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "research/object_detection/core/matcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
}
|
"""Test getting the total resource volume.
"""
import datetime
import testscenarios
from ceilometer.publisher import rpc
from ceilometer import sample
from ceilometer.tests import api as tests_api
from ceilometer.tests import db as tests_db
load_tests = testscenarios.load_tests_apply_scenarios
class TestSumResourceVolume(tests_api.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestSumResourceVolume, self).setUp()
for i in range(3):
s = sample.Sample(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.sample',
},
source='source1',
)
msg = rpc.meter_message_from_counter(
s,
self.CONF.publisher_rpc.metering_secret,
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum')
expected = {'volume': 5 + 6 + 7}
self.assertEqual(data, expected)
def test_no_time_bounds_non_admin(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
headers={"X-Roles": "Member",
"X-Project-Id": "project1"})
self.assertEqual(data, {'volume': 5 + 6 + 7})
def test_no_time_bounds_wrong_tenant(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
headers={"X-Roles": "Member",
"X-Project-Id": "?"})
self.assertEqual(data, {'volume': None})
def test_start_timestamp(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
start_timestamp='2012-09-25T11:30:00')
expected = {'volume': 6 + 7}
self.assertEqual(data, expected)
def test_start_timestamp_after(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
start_timestamp='2012-09-25T12:34:00')
expected = {'volume': None}
self.assertEqual(data, expected)
def test_end_timestamp(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
end_timestamp='2012-09-25T11:30:00')
expected = {'volume': 5}
self.assertEqual(data, expected)
def test_end_timestamp_before(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
end_timestamp='2012-09-25T09:54:00')
expected = {'volume': None}
self.assertEqual(data, expected)
def test_start_end_timestamp(self):
data = self.get('/resources/resource-id/meters/volume.size/volume/sum',
start_timestamp='2012-09-25T11:30:00',
end_timestamp='2012-09-25T11:32:00')
expected = {'volume': 6}
self.assertEqual(data, expected)
|
{
"content_hash": "678b26d9b14f2a73c45db53ee37207c7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 37.95454545454545,
"alnum_prop": 0.5529940119760479,
"repo_name": "rackerlabs/instrumented-ceilometer",
"id": "952e21a1f7793e1bab677f0fdc04505d70fe9bb3",
"size": "4071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/api/v1/test_sum_resource_volume_scenarios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149656"
},
{
"name": "JavaScript",
"bytes": "361114"
},
{
"name": "Python",
"bytes": "1897887"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
}
|
print type([]) # is same [].__class__
print [].__class__
print list
print isinstance([], list) #T
print isinstance([], dict) #F
print isinstance([], object) #T
print isinstance([], (dict, object)) #T, Compare "[]" is dict or object.
print dir([]) # get all method(just name) about list.
print help([]) # print the method that explain how to use.
a = ['abc', 'def']
print list.__len__(a) #same as len(a)
list.append(a, "another") # same as a.append("another")
print a
|
{
"content_hash": "abe459cd503e2e90eb5f4ebb50e71edb",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 72,
"avg_line_length": 31.266666666666666,
"alnum_prop": 0.6417910447761194,
"repo_name": "williamHuang5468/ExpertPython",
"id": "b56953be1f8fd1bc87584a38f41f45969f0ffd1a",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Classes/SubClass/Introspecting instances of built-in types/Type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3112"
}
],
"symlink_target": ""
}
|
"""ManagementSystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^$', 'system.controllers.topView.redirectToTop'),
url(r'^system/', include('system.urls', namespace='system')),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'django.contrib.auth.views.login',
{'template_name': 'login.html'}, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'template_name': 'logout.html'}, name='logout'),
]
urlpatterns += staticfiles_urlpatterns()
|
{
"content_hash": "31ff0a5e12c97a014365286938634ef8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 41.892857142857146,
"alnum_prop": 0.690537084398977,
"repo_name": "XMLPro/ManagementSystem",
"id": "c1f54086181c04fb361afb3213a84b303618fdc4",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ManagementSystem/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132456"
},
{
"name": "HTML",
"bytes": "19215"
},
{
"name": "JavaScript",
"bytes": "4097"
},
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "36673"
}
],
"symlink_target": ""
}
|
"""
GUI APP to convert currency 1 to currency 2's value given exchange rate
#Author : Satheesh Gopalan
"""
import Tkinter
from Tkinter import *
import tkMessageBox
def OnClick():
"""
Handle the Button click's
Gets input
Verifies that input is valid
"""
currency1 = 0
exchange_rate = 0
state = 0
try:
currency1 = float(E1.get())
exchange_rate = float(E2.get())
except ValueError:
print "Bad Input!"
state = 1
E1.delete(0, END)
E2.delete(0,END)
result(currency1,exchange_rate,state)
def result(x,y,z):
""" Print the Result in message box
Variable z is to verify that input are valid (in GOOD STATE)
Variable x and y are inputs
"""
if z ==1 :
tkMessageBox.showinfo("RESULTS", "Invalid Input !! \n Please Try Again !! \n\n Press \"OK\" to quit.")
root.quit()
else :
r = x * y
tkMessageBox.showinfo("RESULTS", "Value in Currency 2 is : " + str(r) + "\n Rounded off Value :" + str(round(r,2)))
# GUI
root = Tk()
root.title("Currency Converter")
#Labels & Enteries
L1 = Label(root, text="Currency 1 Amount ")
L1.pack()
L1.grid_location(0,0)
E1 = Entry(root , bd =5 )
E1.pack()
L1.grid_location(20,0)
L2 = Label(root, text="Exchange Rate ")
L2.pack()
L2.grid_location(0,20)
E2 = Entry(root , bd =5 )
E2.pack()
L2.grid_location(20,30)
#BUTTON
B = Tkinter.Button(root, text ="Convert!",command = OnClick )
B.pack()
B.grid_location(30,30)
#LOOP IT TILL YOU MAKE IT
root.mainloop()
|
{
"content_hash": "a9cfcf19457405fb6f9014f7a0aa1d54",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 123,
"avg_line_length": 20.358974358974358,
"alnum_prop": 0.6001259445843828,
"repo_name": "satheeshgopalan/python",
"id": "db5abb589ee749d5582531c4bd4f8fd24ac5524d",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CurrencyConverter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8764"
}
],
"symlink_target": ""
}
|
import subprocess
from subprocess import check_output, CalledProcessError
from flask import Flask, render_template, request, redirect, flash, url_for, jsonify
app = Flask(__name__)
illegal_chars = " '\"/\:;|&`()$<>*?{}[]"
illegal_chars_t = tuple(illegal_chars)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/run', methods=['POST'])
def run():
name = request.args.get("name")
out = None
err = None
for i in illegal_chars_t:
if i in name:
err = "Illegal characters: " + illegal_chars + ", Found illegal character in: " + name
break
if err == None:
try:
out = check_output(["/bin/sh", "scripts/run.sh", name], stderr=subprocess.STDOUT)
except CalledProcessError as e:
out = e.returncode
err = e.output
result = {
"Result":{
"out": out,
"err": err,
}
}
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True, host= '0.0.0.0')
|
{
"content_hash": "1a078990f1458609c6f86169c0501058",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 98,
"avg_line_length": 27.05128205128205,
"alnum_prop": 0.5620853080568721,
"repo_name": "shrkw/run-your-command-via-web-ui-py",
"id": "81af2b602fcdc851495f310ef51d1aca768f5c94",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90"
},
{
"name": "JavaScript",
"bytes": "2"
},
{
"name": "Python",
"bytes": "492"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous helper utils for Tensorflow."""
import os
import numpy as np
import tensorflow as tf
from typing import Any, Iterable, List, Union
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
"""A type that represents a valid Tensorflow expression."""
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
"""A type that can be converted to a valid Tensorflow expression."""
def run(*args, **kwargs) -> Any:
"""Run the specified ops in the default session."""
assert_tf_initialized()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x: Any) -> bool:
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints."""
return [dim.value for dim in shape]
def flatten(x: TfExpressionEx) -> TfExpression:
"""Shortcut function for flattening a tensor."""
with tf.name_scope("Flatten"):
return tf.reshape(x, [-1])
def log2(x: TfExpressionEx) -> TfExpression:
"""Logarithm in base 2."""
with tf.name_scope("Log2"):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x: TfExpressionEx) -> TfExpression:
"""Exponent in base 2."""
with tf.name_scope("Exp2"):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
"""Linear interpolation."""
with tf.name_scope("Lerp"):
return a + (b - a) * t
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
"""Linear interpolation with clip."""
with tf.name_scope("LerpClip"):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope: str) -> tf.name_scope:
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
return tf.name_scope(scope + "/")
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
"""Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
def _sanitize_tf_config(config_dict: dict = None) -> dict:
# Defaults.
cfg = dict()
cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
# User overrides.
if config_dict is not None:
cfg.update(config_dict)
return cfg
def init_tf(config_dict: dict = None) -> None:
"""Initialize TensorFlow session using good default settings."""
# Skip if already initialized.
if tf.get_default_session() is not None:
return
# Setup config dict and random seeds.
cfg = _sanitize_tf_config(config_dict)
np_random_seed = cfg["rnd.np_random_seed"]
if np_random_seed is not None:
np.random.seed(np_random_seed)
tf_random_seed = cfg["rnd.tf_random_seed"]
if tf_random_seed == "auto":
tf_random_seed = np.random.randint(1 << 31)
if tf_random_seed is not None:
tf.set_random_seed(tf_random_seed)
# Setup environment variables.
for key, value in list(cfg.items()):
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
os.environ[fields[1]] = str(value)
# Create default TensorFlow session.
create_session(cfg, force_as_default=True)
def assert_tf_initialized():
"""Check that TensorFlow session has been initialized."""
if tf.get_default_session() is None:
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
"""Create tf.Session based on config dict."""
# Setup TensorFlow config proto.
cfg = _sanitize_tf_config(config_dict)
config_proto = tf.ConfigProto()
for key, value in cfg.items():
fields = key.split(".")
if fields[0] not in ["rnd", "env"]:
obj = config_proto
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
# Create session.
session = tf.Session(config=config_proto)
if force_as_default:
# pylint: disable=protected-access
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__() # pylint: disable=no-member
return session
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
"""Create tf.Variable with large initial value without bloating the tf graph."""
assert_tf_initialized()
assert isinstance(initial_value, np.ndarray)
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
var = tf.Variable(zeros, *args, **kwargs)
set_vars({var: initial_value})
return var
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
Can be used as an input transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if nhwc_to_nchw:
images = tf.transpose(images, [0, 3, 1, 2])
return (images - drange[0]) * ((drange[1] - drange[0]) / 255)
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1):
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
Can be used as an output transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if shrink > 1:
ksize = [1, 1, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
if nchw_to_nhwc:
images = tf.transpose(images, [0, 2, 3, 1])
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
return tf.saturate_cast(images, tf.uint8)
|
{
"content_hash": "42e4c1edf9acd5707e13b3724820c7bd",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 173,
"avg_line_length": 38.604166666666664,
"alnum_prop": 0.6499730167296276,
"repo_name": "microsoft/DiscoFaceGAN",
"id": "a431a4d4d18a32c9cd44a14ce89f35e038dc312c",
"size": "9267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnnlib/tflib/tfutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "380445"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import ipyvuetify as v
import ipywidgets as widgets
import traitlets
from traitlets import * # noqa
from . import traitlets as vt
import os
import vaex.jupyter
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
vaex_components = {}
def component(name):
def wrapper(cls):
vaex_components[name] = cls
return cls
return wrapper
# mixin class
class UsesVaexComponents(traitlets.HasTraits):
@traitlets.default('components')
def _components(self):
return vaex_components
class PlotTemplate(v.VuetifyTemplate):
show_output = traitlets.Bool(False).tag(sync=True)
new_output = traitlets.Bool(False).tag(sync=True)
title = traitlets.Unicode('Vaex').tag(sync=True)
drawer = traitlets.Bool(True).tag(sync=True)
clipped = traitlets.Bool(False).tag(sync=True)
model = traitlets.Any(True).tag(sync=True)
floating = traitlets.Bool(False).tag(sync=True)
dark = traitlets.Bool(False).tag(sync=True)
mini = traitlets.Bool(False).tag(sync=True)
components = traitlets.Dict(None, allow_none=True).tag(sync=True, **widgets.widget.widget_serialization)
items = traitlets.Any([]).tag(sync=True)
type = traitlets.Unicode('temporary').tag(sync=True)
items = traitlets.List(['red', 'green', 'purple']).tag(sync=True)
button_text = traitlets.Unicode('menu').tag(sync=True)
drawers = traitlets.Any(['Default (no property)', 'Permanent', 'Temporary']).tag(sync=True)
template = traitlets.Unicode('''
<v-app>
<v-navigation-drawer
v-model="model"
:permanent="type === 'permanent'"
:temporary="type === 'temporary'"
:clipped="clipped"
:floating="floating"
:mini-variant="mini"
absolute
overflow
>
<control-widget/>
</v-list>
</v-navigation-drawer>
<v-navigation-drawer
v-model="show_output"
:temporary="type === 'temporary'"
clipped
right
absolute
overflow
>
<h3>Output</h3>
<output-widget />
</v-navigation-drawer>
<v-app-bar :clipped-left="clipped" absolute dense>
<v-app-bar-nav-icon
v-if="type !== 'permanent'"
@click.stop="model = !model"
></v-app-bar-nav-icon>
<v-toolbar-title>{{title}} </v-toolbar-title>
<v-spacer></v-spacer>
<v-btn icon @click.stop="show_output = !show_output; new_output=false">
<v-badge color="red" overlap>
<template v-slot:badge v-if="new_output">
<span>!</span>
</template>
<v-icon>error_outline</v-icon>
</v-badge>
</v-btn>
</v-app-bar>
<v-content>
<main-widget/>
</v-content>
</v-app>
''').tag(sync=True)
@component('vaex-counter')
class Counter(v.VuetifyTemplate):
characters = traitlets.List(traitlets.Unicode()).tag(sync=True)
value = traitlets.Integer(None, allow_none=True)
format = traitlets.Unicode('{: 14,d}')
prefix = traitlets.Unicode('').tag(sync=True)
postfix = traitlets.Unicode('').tag(sync=True)
@traitlets.observe('value')
def _value(self, change):
text = self.format.format(self.value)
self.characters = [k.replace(' ', ' ') for k in text]
template = traitlets.Unicode('''
<div>
{{ prefix }}
<v-slide-y-transition :key=index v-for="(character, index) in characters" leave-absolute>
<span :key="character" v-html='character'></span>
</v-slide-y-transition>
{{ postfix }}
</div>
''').tag(sync=True)
@component('vaex-status')
class Status(v.VuetifyTemplate):
value = traitlets.Unicode().tag(sync=True)
template = traitlets.Unicode('''
<v-slide-y-transition leave-absolute>
<span :key="value" v-html='value'></span>
</v-slide-y-transition>
''').tag(sync=True)
@component('vaex-progress-circular')
class ProgressCircularNoAnimation(v.VuetifyTemplate):
"""v-progress-circular that avoids animations"""
parts = traitlets.List(traitlets.Unicode()).tag(sync=True)
width = traitlets.Integer().tag(sync=True)
size = traitlets.Integer().tag(sync=True)
value = traitlets.Float().tag(sync=True)
color = traitlets.Unicode('{: 14,d}').tag(sync=True)
text = traitlets.Unicode('{: 14,d}').tag(sync=True)
hidden = traitlets.Bool(False).tag(sync=True)
template = traitlets.Unicode('''
<v-progress-circular v-if="!hidden" :key="value" :size="size" :width="width" :value="value" :color="color">{{ text }}</v-progress-circular>
<v-progress-circular v-else style="visibility: hidden" :key="value" :size="size" :width="width" :value="value" :color="color">{{ text }}</v-progress-circular>
''').tag(sync=True)
@component('vaex-expression')
class Expression(v.TextField):
df = traitlets.Any()
valid = traitlets.Bool(True)
value = vt.Expression(None, allow_none=True)
@traitlets.default('v_model')
def _v_model(self):
columns = self.df.get_column_names(strings=False)
if columns:
if len(columns) >= 2:
return columns[0] + " + " + columns[1]
else:
return columns[0]
columns = self.df.get_column_names()
return columns[0]
@traitlets.default('value')
def _value(self):
self.value = None if self.v_model is None else self.df[self.v_model]
@traitlets.default('label')
def _label(self):
return "Custom expression"
@traitlets.default('placeholder')
def _placeholder(self):
return "Enter a custom expression"
@traitlets.default('prepend_icon')
def _prepend_icon(self):
return 'functions'
@traitlets.observe('v_model')
def _on_update_v_model(self, change):
self.check_expression()
@traitlets.observe('value')
def _on_update_value(self, change):
self.v_model = None if self.value is None else str(self.value)
def check_expression(self):
try:
self.df.validate_expression(self.v_model)
except Exception as e:
self.success_messages = None
self.error_messages = str(e)
self.valid = False
return
self.error_messages = None
self.success_messages = "Looking good"
self.valid = True
self.value = self.v_model
self._clear_succes()
return True
@vaex.jupyter.debounced(delay_seconds=1.5, skip_gather=True)
def _clear_succes(self):
self.success_messages = None
ExpressionTextArea = Expression
class ExpressionSelectionTextArea(ExpressionTextArea):
# selection is v_model
selection_name = traitlets.Any('default')
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self.update_selection()
@traitlets.default('v_model')
def _v_model(self):
columns = self.df.get_column_names(strings=False)
return columns[0] + ' == 0'
@traitlets.default('label')
def _label(self):
return "Filter by custom expression"
@traitlets.default('placeholder')
def _placeholder(self):
return "Enter a custom (boolean) expression"
@traitlets.default('prepend_icon')
def _prepend_icon(self):
return 'filter_list'
@traitlets.observe('v_model')
def update_custom_selection(self, change):
if self.check_expression():
self.update_selection()
def update_selection(self):
self.df.select(self.v_model, name=self.selection_name)
class ColumnPicker(v.VuetifyTemplate):
df = traitlets.Any()
items = traitlets.List(['foo', 'bar']).tag(sync=True)
tooltip = traitlets.Unicode('Add example expression based on column...').tag(sync=True)
template = traitlets.Unicode('''
<v-layout>
<v-menu offset-y>
<template v-slot:activator="{ on: menu }">
<v-tooltip bottom>
{{ tooltip }}
<template v-slot:activator="{ on: tooltip}">
<v-btn
v-on="{...menu, ...tooltip}" fab color='primary' small>
<v-icon>add</v-icon>
</v-btn>
</template>
</v-tooltip>
</template>
<v-list>
<v-list-item
v-for="(item, index) in items"
:key="index"
@click="menu_click(index)">
<v-list-item-title>{{ item }}</v-list-item-title>
</v-list-item>
</v-list>
</v-menu>
</v-layout>''').tag(sync=True)
@traitlets.default('items')
def _items(self):
return self.df.get_column_names()
def vue_menu_click(self, data):
pass
class ColumnExpressionAdder(ColumnPicker):
component = traitlets.Any()
target = traitlets.Unicode('v_model')
def vue_menu_click(self, data):
value = getattr(self.component, self.target)
setattr(self.component, self.target, value + ' + ' + str(self.items[data]))
class ColumnSelectionAdder(ColumnPicker):
component = traitlets.Any()
target = traitlets.Unicode('v_model')
def vue_menu_click(self, data):
value = getattr(self.component, self.target)
setattr(self.component, self.target, value + ' & ({} == 0)'.format(self.items[data]))
@component('vaex-selection-editor')
class SelectionEditor(v.VuetifyTemplate):
df = traitlets.Any()
input = traitlets.Any()
adder = traitlets.Any()
on_close = traitlets.Any()
components = traitlets.Dict(None, allow_none=True).tag(sync=True, **widgets.widget.widget_serialization)
@traitlets.default('components')
def _components(self):
return {'component-input': self.input, 'adder': self.adder}
@traitlets.default('input')
def _input(self):
return ExpressionSelectionTextArea(df=self.df)
@traitlets.default('adder')
def _adder(self):
return ColumnSelectionAdder(df=self.df, component=self.input)
template = traitlets.Unicode('''
<v-layout column>
<component-input></component-input>
<v-layout pa-4>
<adder></adder>
</v-layout>
</v-layout>''').tag(sync=True)
class Selection(v.VuetifyTemplate):
df = traitlets.Any().tag(sync_ref=True)
name = traitlets.Unicode('default').tag(sync=True)
value = traitlets.Unicode(None, allow_none=True).tag(sync=True)
@traitlets.default('template')
def _template(self):
return load_template('vue/selection.vue')
@traitlets.default('components')
def _components(self):
return vaex_components
class SelectionToggleList(v.VuetifyTemplate):
df = traitlets.Any().tag(sync_ref=True)
title = traitlets.Unicode('Choose selections').tag(sync=True)
selection_names = traitlets.List(traitlets.Unicode()).tag(sync=True)
value = traitlets.List(traitlets.Unicode()).tag(sync=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.df.signal_selection_changed.connect(self._on_change_selection)
def _on_change_selection(self, df, name):
new_names = [name for name in self.df.selection_histories.keys() if not name.startswith('__') and df.has_selection(name)]
self.selection_names = new_names
self.value = [v for v in self.value if v in self.selection_names]
@traitlets.default('selection_names')
def _selection_names(self):
return [name for name in self.df.selection_histories.keys() if not name.startswith('__')]
@traitlets.default('template')
def _template(self):
return load_template('vue/selection_toggle_list.vue')
@traitlets.default('components')
def _components(self):
return vaex_components
class VirtualColumnEditor(v.VuetifyTemplate):
df = traitlets.Any()
editor = traitlets.Any()
adder = traitlets.Any()
on_close = traitlets.Any()
column_name = traitlets.Unicode('mycolumn').tag(sync=True)
components = traitlets.Dict(None, allow_none=True).tag(sync=True, **widgets.widget.widget_serialization)
@traitlets.default('components')
def _components(self):
return {'editor': self.editor, 'adder': self.adder}
@traitlets.default('editor')
def _editor(self):
return ExpressionTextArea(df=self.df, rows=1)
@traitlets.default('adder')
def _adder(self):
return ColumnExpressionAdder(df=self.df, component=self.editor)
template = traitlets.Unicode('''
<v-layout column style="position: relative">
<v-text-field placeholder="e.g. mycolumn" label="Column name" v-model='column_name' prepend-icon='edit'>test</v-text-field>
<editor></editor>
<div style="position: absolute; right: 20px; bottom: 30px; opacity: 0.8">
<adder></adder>
</div>
</v-layout>''').tag(sync=True)
def save_column(self):
if self.editor.valid:
self.df[self.column_name] = self.editor.v_model
if self.on_close:
self.on_close()
class ColumnList(v.VuetifyTemplate, vt.ColumnsMixin):
_metadata = traitlets.Dict(default_value=None, allow_none=True).tag(sync=True)
column_filter = traitlets.Unicode('').tag(sync=True)
valid_expression = traitlets.Bool(False).tag(sync=True)
dialog_open = traitlets.Bool(False).tag(sync=True)
editor = traitlets.Any()
editor_open = traitlets.Bool(False).tag(sync=True)
tooltip = traitlets.Unicode('Add example expression based on column...').tag(sync=True)
template = traitlets.Unicode(load_template('vue/columnlist.vue')).tag(sync=True)
def __init__(self, df, **kwargs):
super(ColumnList, self).__init__(df=df, **kwargs)
traitlets.dlink((self.editor.editor, 'valid'), (self, 'valid_expression'))
self.editor.editor.on_event('keypress.enter', self._on_enter)
@traitlets.default('editor')
def _editor(self):
editor = VirtualColumnEditor(df=self.df)
return editor
@traitlets.default('components')
def _components(self):
return {'content-editor': self.editor}
def _on_enter(self, *ignore):
if self.valid_expression:
self.editor.save_column()
self.dialog_open = False
def vue_add_virtual_column(self, data):
self.dialog_open = True
def vue_save_column(self, data):
self.editor.save_column()
self.dialog_open = False
def vue_column_click(self, data):
name = data['name']
if name in self.df.virtual_columns:
self.editor.editor.v_model = self.df.virtual_columns[name]
self.editor.column_name = name
self.dialog_open = True
class ColumnPicker(v.VuetifyTemplate, vt.ColumnsMixin):
template = traitlets.Unicode(load_template('vue/column-select.vue')).tag(sync=True)
label = traitlets.Unicode('Column').tag(sync=True)
value = vt.Expression(None, allow_none=True).tag(sync=True)
tools_items_default = [
{'value': 'pan-zoom', 'icon': 'pan_tool', 'tooltip': "Pan & zoom"},
{'value': 'select-rect', 'icon': 'mdi-selection-drag', 'tooltip': "Rectangle selection"},
{'value': 'select-x', 'icon': 'mdi-drag-vertical', 'tooltip': "X-Range selection"},
]
selection_items_default = [
{'value': 'replace', 'icon': 'mdi-circle-medium', 'tooltip': "Replace mode"},
{'value': 'and', 'icon': 'mdi-set-center', 'tooltip': "And mode"},
{'value': 'or', 'icon': 'mdi-set-all', 'tooltip': "Or mode"},
{'value': 'subtract', 'icon': 'mdi-set-left', 'tooltip': "Subtract mode"},
]
transform_items_default = ['identity', 'log', 'log10', 'log1p', 'log1p']
class ToolsSpeedDial(v.VuetifyTemplate):
expand = traitlets.Bool(False).tag(sync=True)
value = traitlets.Unicode(tools_items_default[0]['value'], allow_none=True).tag(sync=True)
items = traitlets.Any(tools_items_default).tag(sync=True)
template = traitlets.Unicode(load_template('vue/tools-speed-dial.vue')).tag(sync=True)
children = traitlets.List().tag(sync=True, **widgets.widget_serialization)
def vue_action(self, data):
self.value = data['value']
class ToolsToolbar(v.VuetifyTemplate):
interact_value = traitlets.Unicode(tools_items_default[0]['value'], allow_none=True).tag(sync=True)
interact_items = traitlets.Any(tools_items_default).tag(sync=True)
transform_value = traitlets.Unicode(transform_items_default[0]).tag(sync=True)
transform_items = traitlets.List(traitlets.Unicode(), default_value=transform_items_default).tag(sync=True)
supports_transforms = traitlets.Bool(True).tag(sync=True)
supports_normalize = traitlets.Bool(True).tag(sync=True)
z_normalize = traitlets.Bool(False, allow_none=True).tag(sync=True)
normalize = traitlets.Bool(False).tag(sync=True)
selection_mode_items = traitlets.Any(selection_items_default).tag(sync=True)
selection_mode = traitlets.Unicode('replace').tag(sync=True)
@traitlets.default('template')
def _template(self):
return load_template('vue/tools-toolbar.vue')
@observe('z_normalize')
def _observe_normalize(self, change):
self.normalize = bool(self.z_normalize)
class VuetifyTemplate(v.VuetifyTemplate):
_metadata = traitlets.Dict(default_value=None, allow_none=True).tag(sync=True)
class ContainerCard(v.VuetifyTemplate):
_metadata = Dict(default_value=None, allow_none=True).tag(sync=True)
@traitlets.default('template')
def _template(self):
return load_template('vue/card.vue')
title = traitlets.Unicode(None, allow_none=True).tag(sync=True)
subtitle = traitlets.Unicode(None, allow_none=True).tag(sync=True)
text = traitlets.Unicode(None, allow_none=True).tag(sync=True)
main = traitlets.Any().tag(sync=True, **widgets.widget_serialization)
controls = traitlets.List().tag(sync=True, **widgets.widget_serialization)
card_props = traitlets.Dict().tag(sync=True)
main_props = traitlets.Dict().tag(sync=True)
show_controls = traitlets.Bool(False).tag(sync=True)
class Html(v.Html):
_metadata = traitlets.Dict(default_value=None, allow_none=True).tag(sync=True)
class LinkList(VuetifyTemplate):
items = traitlets.List(
[
{'title': 'Vaex (data aggregation)', 'url': "https://github.com/vaexio/vaex", 'img': 'https://vaex.io/img/logos/logo-grey.svg', },
{'icon': "dashboard", 'title': "Voila (dashboard)", 'url': "https://github.com/voila-dashboards/voila"},
{'icon': "mdi-database", 'title': "DataFrame server", 'url': "http://dataframe.vaex.io/"},
{'title': 'ipyvolume (3d viz)', 'url': "https://github.com/maartenbreddels/ipyvolume", 'img': 'https://raw.githubusercontent.com/maartenbreddels/ipyvolume/master/misc/icon.svg', },
{'title': 'GitHub Repo', 'url': 'https://github.com/vaexio/vaex', 'img': 'https://github.githubassets.com/pinned-octocat.svg'},
{'icon': "widgets", 'title': "jupyter widgets", 'url': "https://github.com/jupyter-widgets/ipywidgets"},
],
).tag(sync=True)
@traitlets.default('template')
def _template(self):
return load_template('vue/link-list.vue')
|
{
"content_hash": "2426fa60c5debf6a44edaf61e15ee40b",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 192,
"avg_line_length": 35.07027027027027,
"alnum_prop": 0.6266954377311961,
"repo_name": "maartenbreddels/vaex",
"id": "eae9766bde688ba3273336b8a21569c3c8f56c0f",
"size": "19464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/vaex-jupyter/vaex/jupyter/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1888"
},
{
"name": "C++",
"bytes": "81166"
},
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "GLSL",
"bytes": "6204"
},
{
"name": "HTML",
"bytes": "177613"
},
{
"name": "JavaScript",
"bytes": "1489136"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "PHP",
"bytes": "33807"
},
{
"name": "Python",
"bytes": "1893232"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from time import gmtime, strftime
import pylogging
import unittest
import os
import datetime
class TestPyLoggingMethods(unittest.TestCase):
def test_filters(self):
""" Test Logger Class """
now = datetime.datetime.now()
log_path = os.path.dirname(os.path.abspath(__file__)) + '/' + now.strftime('%Y-%m-%d') + '.log'
if os.path.isfile(log_path):
os.remove(log_path)
self._logger = pylogging.PyLogging(LOG_FILE_PATH = os.path.dirname(os.path.abspath(__file__)) + '/')
filterAdded = self._logger.addFilter(self._filterAdded)
filterRemoved = self._logger.addFilter(self._filterRemoved)
self._logger.removeFilter(filterRemoved)
self._logger.info("Line1.")
self._logger.warning("Line2.")
self._logger.error("Line3.")
self._logger.critical("Line4.")
self._logger.log("Line5.")
with open(log_path, 'r') as LogFile:
data = LogFile.readlines()
data = [item.rstrip() for item in data if item != '\n']
self.assertEqual(data[0], 'INFO: <'+ now.strftime('%Y-%m-%d %H:%M') +'> Line1.info'.rstrip())
self.assertEqual(data[1], 'WARNING: <'+ now.strftime('%Y-%m-%d %H:%M') +'> Line2.warning'.rstrip())
self.assertEqual(data[2], 'ERROR: <'+ now.strftime('%Y-%m-%d %H:%M') +'> Line3.error'.rstrip())
self.assertEqual(data[3], 'CRITICAL: <'+ now.strftime('%Y-%m-%d %H:%M') +'> Line4.critical'.rstrip())
self.assertEqual(data[4], 'LOG: <'+ now.strftime('%Y-%m-%d %H:%M') +'> Line5.log'.rstrip())
if os.path.isfile(log_path):
os.remove(log_path)
def _filterAdded(self, type, msg):
return msg + type
def _filterRemoved(self, type, msg):
return msg + type
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b4e7c1635320608aed79f1aa8bc783a5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 104,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6481700118063755,
"repo_name": "Clivern/PyLogging",
"id": "f61d487b381127e9fa1f3873357d6adbaa0b389d",
"size": "1694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17147"
}
],
"symlink_target": ""
}
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from students.models import Child_family_detail
class Child_family_detailView(object):
model = Child_family_detail
def get_template_names(self):
"""Nest templates within child_family_detail directory."""
tpl = super(Child_family_detailView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'child_family_detail'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Child_family_detailDateView(Child_family_detailView):
date_field = 'created_date'
month_format = '%m'
class Child_family_detailBaseListView(Child_family_detailView):
paginate_by = 10
class Child_family_detailArchiveIndexView(
Child_family_detailDateView, Child_family_detailBaseListView, ArchiveIndexView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailCreateView(Child_family_detailView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailDateDetailView(Child_family_detailDateView, DateDetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailDayArchiveView(
Child_family_detailDateView, Child_family_detailBaseListView, DayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailDeleteView(Child_family_detailView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailDetailView(Child_family_detailView, DetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailListView(Child_family_detailBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailMonthArchiveView(
Child_family_detailDateView, Child_family_detailBaseListView, MonthArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailTodayArchiveView(
Child_family_detailDateView, Child_family_detailBaseListView, TodayArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailUpdateView(Child_family_detailView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailWeekArchiveView(
Child_family_detailDateView, Child_family_detailBaseListView, WeekArchiveView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('students_child_family_detail_list')
class Child_family_detailYearArchiveView(
Child_family_detailDateView, Child_family_detailBaseListView, YearArchiveView):
make_object_list = True
|
{
"content_hash": "a665def56b47073df75a2f2b87ce9e5d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 85,
"avg_line_length": 32.891666666666666,
"alnum_prop": 0.7137066126171776,
"repo_name": "mravikumar281/staging-server",
"id": "c1ffff7f36c4a92c75b19b9f31e4be845ef78775",
"size": "3947",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "students_old/views/child_family_detail_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "805986"
},
{
"name": "HTML",
"bytes": "3648803"
},
{
"name": "JavaScript",
"bytes": "3804321"
},
{
"name": "Makefile",
"bytes": "3152"
},
{
"name": "PHP",
"bytes": "5016"
},
{
"name": "Python",
"bytes": "2107084"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
import re
import unittest
from unittest import mock
import pytest
from google.cloud.bigquery import DEFAULT_RETRY, DatasetReference, Table, TableReference
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem
from google.cloud.exceptions import NotFound
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigquery import (
BigQueryCursor,
BigQueryHook,
_api_resource_configs_duplication_check,
_cleanse_time_partitioning,
_split_tablename,
_validate_src_fmt_configs,
_validate_value,
)
PROJECT_ID = "bq-project"
CREDENTIALS = "bq-credentials"
DATASET_ID = "bq_dataset"
TABLE_ID = "bq_table"
PARTITION_ID = "20200101"
VIEW_ID = 'bq_view'
JOB_ID = "1234"
LOCATION = 'europe-north1'
TABLE_REFERENCE_REPR = {
'tableId': TABLE_ID,
'datasetId': DATASET_ID,
'projectId': PROJECT_ID,
}
TABLE_REFERENCE = TableReference.from_api_repr(TABLE_REFERENCE_REPR)
class _BigQueryBaseTestClass(unittest.TestCase):
def setUp(self) -> None:
class MockedBigQueryHook(BigQueryHook):
def _get_credentials_and_project_id(self):
return CREDENTIALS, PROJECT_ID
self.hook = MockedBigQueryHook()
class TestBigQueryHookMethods(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'bigquery', 'v2', http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__")
def test_bigquery_bigquery_conn_id_deprecation_warning(
self,
mock_base_hook_init,
):
bigquery_conn_id = "bigquery conn id"
warning_message = (
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
)
with pytest.warns(DeprecationWarning) as warnings:
BigQueryHook(bigquery_conn_id=bigquery_conn_id)
mock_base_hook_init.assert_called_once_with(
delegate_to=None,
gcp_conn_id='bigquery conn id',
impersonation_chain=None,
)
assert warning_message == str(warnings[0].message)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_location_propagates_properly(self, run_with_config, _):
# TODO: this creates side effect
assert self.hook.location is None
self.hook.run_query(sql='select 1', location='US')
assert run_with_config.call_count == 1
assert self.hook.location == 'US'
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.read_gbq')
def test_get_pandas_df(self, mock_read_gbq):
self.hook.get_pandas_df('select 1')
mock_read_gbq.assert_called_once_with(
'select 1', credentials=CREDENTIALS, dialect='legacy', project_id=PROJECT_ID, verbose=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_options(self, mock_get_service):
with pytest.raises(
Exception,
match=(
r"\['THIS IS NOT VALID'\] contains invalid schema update options."
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]"
),
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_and_write_disposition(self, mock_get_service):
with pytest.raises(
Exception,
match="schema_update_options is only allowed if"
" write_disposition is 'WRITE_APPEND' or 'WRITE_TRUNCATE'.",
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY',
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete",
side_effect=[False, True],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_cancel_queries(self, mock_client, mock_poll_job_complete):
running_job_id = 3
self.hook.running_job_id = running_job_id
self.hook.cancel_query()
mock_poll_job_complete.has_calls(mock.call(running_job_id), mock.call(running_job_id))
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=None)
mock_client.return_value.cancel_job.assert_called_once_with(job_id=running_job_id)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_default(
self,
mock_insert,
_,
):
self.hook.run_query('query')
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect(self, mock_insert, _):
self.hook.run_query('query', use_legacy_sql=False)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_legacy_with_query_params(self, mock_insert, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
self.hook.run_query('query', use_legacy_sql=False, query_params=params)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
with pytest.raises(ValueError, match="Query parameters are not allowed when using legacy SQL"):
self.hook.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_without_sql_fails(self, _):
with pytest.raises(
TypeError, match=r"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`"
):
self.hook.run_query(sql=None)
@parameterized.expand(
[
(['ALLOW_FIELD_ADDITION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_schema_update_options(
self,
schema_update_options,
write_disposition,
mock_insert,
mock_get_service,
):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['schemaUpdateOptions'] == schema_update_options
assert kwargs['configuration']['query']['writeDisposition'] == write_disposition
@parameterized.expand(
[
(
['INCORRECT_OPTION'],
None,
r"\['INCORRECT_OPTION'\] contains invalid schema update options\. "
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'],
None,
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'\] contains invalid "
r"schema update options\. Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION'],
None,
r"schema_update_options is only allowed if write_disposition is "
r"'WRITE_APPEND' or 'WRITE_TRUNCATE'",
),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_schema_update_options_incorrect(
self,
schema_update_options,
write_disposition,
expected_regex,
mock_get_service,
):
with pytest.raises(ValueError, match=expected_regex):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
@parameterized.expand([(True,), (False,)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_api_resource_configs(
self,
bool_val,
mock_insert,
_,
):
self.hook.run_query('query', api_resource_configs={'query': {'useQueryCache': bool_val}})
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useQueryCache'] is bool_val
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_api_resource_configs_duplication_warning(self, mock_get_service):
with pytest.raises(
ValueError,
match=(
r"Values of useLegacySql param are duplicated\. api_resource_configs "
r"contained useLegacySql param in `query` config and useLegacySql was "
r"also provided with arg to run_query\(\) method\. Please remove duplicates\."
),
):
self.hook.run_query(
'query', use_legacy_sql=True, api_resource_configs={'query': {'useLegacySql': False}}
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
key_one = True
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
# This config should raise a value error.
src_fmt_configs = {"test_config_unknown": "val"}
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert (
"test_config_known" in src_fmt_configs
), "src_fmt_configs should contain al known src_fmt_configs"
assert (
"compatibility_val" in src_fmt_configs
), "_validate_src_fmt_configs should add backward_compatibility config"
@parameterized.expand([("AVRO",), ("PARQUET",), ("NEWLINE_DELIMITED_JSON",), ("DATASTORE_BACKUP",)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_non_csv_as_src_fmt(self, fmt, _):
try:
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
source_uris=[],
source_format=fmt,
autodetect=True,
)
except ValueError:
self.fail("run_load() raised ValueError unexpectedly!")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_extract(self, mock_insert):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
destination_cloud_storage_uris = ["gs://bucket/file.csv"]
expected_configuration = {
"extract": {
"sourceTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"compression": "NONE",
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": "CSV",
"fieldDelimiter": ",",
"printHeader": True,
}
}
self.hook.run_extract(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
)
mock_insert.assert_called_once_with(configuration=expected_configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token='page123',
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token='page123',
selected_fields=None,
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_run_table_delete(self, mock_client, mock_table):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
self.hook.run_table_delete(source_project_dataset_table, ignore_if_missing=False)
mock_table.from_string.assert_called_once_with(source_project_dataset_table)
mock_client.return_value.delete_table.assert_called_once_with(
table=mock_table.from_string.return_value, not_found_ok=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(table_resource=table_resource, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert view_access in dataset.access_entries
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset_tables_list(self, mock_client):
table_list = [
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-2"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-2"},
]
table_list_response = [Table.from_api_repr({"tableReference": t}) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables_list(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference, max_results=None
)
assert table_list == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("logging.Logger.info")
def test_cancel_query_jobs_to_cancel(
self,
mock_logger_info,
poll_job_complete,
):
poll_job_complete.return_value = True
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
poll_job_complete.assert_called_once_with(job_id=JOB_ID)
mock_logger_info.has_call(mock.call("No running BigQuery jobs to cancel."))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_timeout(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 13
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(
mock.call(
f"Stopping polling due to timeout. Job with id {JOB_ID} "
"has not completed cancel and may or may not finish."
)
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_completed(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 12 + [True]
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(mock.call(f"Job successfully canceled: {PROJECT_ID}, {PROJECT_ID}"))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee', 'policyTags': {'names': ['sensitive']}},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{
'name': 'emp_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Name of employee',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
'policyTags': {'names': ['sensitive']},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'salary', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{'name': 'field_1', 'type': 'STRING', 'mode': 'REQUIRED'},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee'},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED', 'description': 'Name of employee'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_source_format(self, mock_get_service):
with pytest.raises(
Exception,
match=r"JSON is not a valid source format. Please use one of the following types: \['CSV', "
r"'NEWLINE_DELIMITED_JSON', 'AVRO', 'GOOGLE_SHEETS', 'DATASTORE_BACKUP', 'PARQUET'\]",
):
self.hook.run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['labels'] == {'label1': 'test1', 'label2': 'test2'}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf,
job_id=JOB_ID,
project_id=PROJECT_ID,
location=LOCATION,
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
'configuration': job_conf,
'jobReference': {'jobId': JOB_ID, 'projectId': PROJECT_ID, 'location': LOCATION},
},
mock_client.return_value,
)
mock_query_job.from_api_repr.return_value.result.assert_called_once_with()
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with pytest.raises(Exception, match="INTERNAL: No default project is specified"):
_split_tablename("dataset.table", None)
@parameterized.expand(
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
]
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = _split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@parameterized.expand(
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
]
)
def test_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(Exception, match=exception_message.format(table_input)):
_split_tablename(table_input, default_project_id, var_name)
class TestTableOperations(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_view(self, mock_bq_client, mock_table):
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False,
}
self.hook.create_empty_table(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, view=view, retry=DEFAULT_RETRY
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'view': view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_patch_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
self.hook.patch_table(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched,
schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched,
view=view_patched,
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
body["tableReference"] = TABLE_REFERENCE_REPR
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_succeed(self, mock_bq_client, mock_table):
self.hook.create_empty_table(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
}
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_extras_succeed(self, mock_bq_client, mock_table):
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
},
'schema': {'fields': schema_fields},
'timePartitioning': time_partitioning,
'clustering': {'fields': cluster_fields},
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_tables_list(self, mock_client):
table_list = [
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table1",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table1",
},
"type": "TABLE",
"creationTime": "1565781859261",
},
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table2",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table2",
},
"type": "TABLE",
"creationTime": "1565782713480",
},
]
table_list_response = [Table.from_api_repr(t) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference,
max_results=None,
retry=DEFAULT_RETRY,
)
for res, exp in zip(result, table_list):
assert res["tableId"] == exp["tableReference"]["tableId"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_materialized_view(self, mock_bq_client, mock_table):
query = """
SELECT product, SUM(amount)
FROM `test-project-id.test_dataset_id.test_table_prefix*`
GROUP BY product
"""
materialized_view = {
'query': query,
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
materialized_view=materialized_view,
retry=DEFAULT_RETRY,
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'materializedView': materialized_view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
class TestBigQueryCursor(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_with_parameters(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
conf = {
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
}
mock_insert.assert_called_once_with(configuration=conf, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_many(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.executemany("SELECT %(foo)s", [{"foo": "bar"}, {"foo": "baz"}])
assert mock_insert.call_count == 2
assert mock_insert.has_calls(
mock.call(
configuration={
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
mock.call(
configuration={
'query': {
'query': "SELECT 'baz'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_description(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
with pytest.raises(NotImplementedError):
bq_cursor.description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_close(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.close()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_rowcount(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.rowcount
assert -1 == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.next")
def test_fetchone(self, mock_next, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchone()
mock_next.call_count == 1
assert mock_next.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone", side_effect=[1, 2, 3, None]
)
def test_fetchall(self, mock_fetchone, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchall()
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone")
def test_fetchmany(self, mock_fetchone, mock_get_service):
side_effect_values = [1, 2, 3, None]
bq_cursor = self.hook.get_cursor()
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany()
assert [1] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(2)
assert [1, 2] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(5)
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_no_jobid(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = None
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_buffer(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.buffer = [1, 2]
result = bq_cursor.next()
assert 1 == result
result = bq_cursor.next()
assert 2 == result
bq_cursor.all_pages_loaded = True
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next(self, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {
"rows": [
{"f": [{"v": "one"}, {"v": 1}]},
{"f": [{"v": "two"}, {"v": 2}]},
],
"pageToken": None,
"schema": {
"fields": [
{"name": "field_1", "type": "STRING"},
{"name": "field_2", "type": "INTEGER"},
]
},
}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.location = LOCATION
result = bq_cursor.next()
assert ['one', 1] == result
result = bq_cursor.next()
assert ['two', 2] == result
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=LOCATION, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_next_no_rows(self, mock_flush_results, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
result = bq_cursor.next()
assert result is None
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=None, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
assert mock_flush_results.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_flush_cursor_in_execute(self, _, mock_insert, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
assert mock_insert.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_flush_cursor(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.page_token = '456dcea9-fcbf-4f02-b570-83f5297c685e'
bq_cursor.job_id = 'c0a79ae4-0e72-4593-a0d0-7dbbf726f193'
bq_cursor.all_pages_loaded = True
bq_cursor.buffer = [('a', 100, 200), ('b', 200, 300)]
bq_cursor.flush_results()
assert bq_cursor.page_token is None
assert bq_cursor.job_id is None
assert not bq_cursor.all_pages_loaded
assert bq_cursor.buffer == []
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_arraysize(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
assert bq_cursor.buffersize is None
assert bq_cursor.arraysize == 1
bq_cursor.set_arraysize(10)
assert bq_cursor.buffersize == 10
assert bq_cursor.arraysize == 10
class TestDatasetsOperations(_BigQueryBaseTestClass):
def test_create_empty_dataset_no_dataset_id_err(self):
with pytest.raises(ValueError, match=r"Please specify `datasetId`"):
self.hook.create_empty_dataset(dataset_id=None, project_id=None)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_params(self, mock_client, mock_dataset):
self.hook.create_empty_dataset(project_id=PROJECT_ID, dataset_id=DATASET_ID, location=LOCATION)
expected_body = {
"location": LOCATION,
"datasetReference": {"datasetId": DATASET_ID, "projectId": PROJECT_ID},
}
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(expected_body)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(dataset_reference=dataset)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_use_values_from_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(
dataset_reference=dataset,
location="Unknown location",
dataset_id="Fashionable Dataset",
project_id="Amazing Project",
)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset(self, mock_client):
_expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
expected_result = Dataset.from_api_repr(_expected_result)
mock_client.return_value.get_dataset.return_value = expected_result
result = self.hook.get_dataset(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.get_dataset.assert_called_once_with(
dataset_ref=DatasetReference(PROJECT_ID, DATASET_ID)
)
assert result == expected_result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_datasets_list(self, mock_client):
datasets = [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_1_test"},
},
]
return_value = [DatasetListItem(d) for d in datasets]
mock_client.return_value.list_datasets.return_value = return_value
result = self.hook.get_datasets_list(project_id=PROJECT_ID)
mock_client.return_value.list_datasets.assert_called_once_with(
project=PROJECT_ID,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
)
for exp, res in zip(datasets, result):
assert res.full_dataset_id == exp["id"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_delete_dataset(self, mock_client):
delete_contents = True
self.hook.delete_dataset(
project_id=PROJECT_ID, dataset_id=DATASET_ID, delete_contents=delete_contents
)
mock_client.return_value.delete_dataset.assert_called_once_with(
dataset=DatasetReference(PROJECT_ID, DATASET_ID),
delete_contents=delete_contents,
retry=DEFAULT_RETRY,
not_found_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_patch_dataset(self, mock_get_service):
dataset_resource = {"access": [{"role": "WRITER", "groupByEmail": "cloud-logs@google.com"}]}
method = mock_get_service.return_value.datasets.return_value.patch
self.hook.patch_dataset(
dataset_id=DATASET_ID, project_id=PROJECT_ID, dataset_resource=dataset_resource
)
method.assert_called_once_with(projectId=PROJECT_ID, datasetId=DATASET_ID, body=dataset_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_dataset(self, mock_client, mock_dataset):
dataset_resource = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
method = mock_client.return_value.update_dataset
dataset = Dataset.from_api_repr(dataset_resource)
mock_dataset.from_api_repr.return_value = dataset
method.return_value = dataset
result = self.hook.update_dataset(
dataset_id=DATASET_ID,
project_id=PROJECT_ID,
dataset_resource=dataset_resource,
fields=["location"],
)
mock_dataset.from_api_repr.assert_called_once_with(dataset_resource)
method.assert_called_once_with(
dataset=dataset,
fields=["location"],
retry=DEFAULT_RETRY,
)
assert result == dataset
class TestTimePartitioningInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('timePartitioning') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_with_auto_detect(self, mock_insert):
destination_project_dataset_table = "autodetect.table"
self.hook.run_load(destination_project_dataset_table, [], [], autodetect=True)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['autodetect'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'load': {
'autodetect': False,
'createDisposition': 'CREATE_IF_NEEDED',
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'sourceFormat': 'CSV',
'sourceUris': [],
'writeDisposition': 'WRITE_EMPTY',
'ignoreUnknownValues': False,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'skipLeadingRows': 0,
'fieldDelimiter': ',',
'quote': None,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'query': {
'query': 'select 1',
'priority': 'INTERACTIVE',
'useLegacySql': True,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'schemaUpdateOptions': [],
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'allowLargeResults': False,
'flattenResults': None,
'writeDisposition': 'WRITE_EMPTY',
'createDisposition': 'CREATE_IF_NEEDED',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {'type': 'DAY'}
assert tp_out == expect
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast', {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
assert tp_out == expect
class TestClusteringInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['clustering'] == {'fields': ['field1', 'field2']}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_default(self, mock_insert):
self.hook.run_query(sql='select 1')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['clustering'] == {'fields': ['field1', 'field2']}
class TestBigQueryHookLegacySql(_BigQueryBaseTestClass):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_hook_uses_legacy_sql_by_default(self, mock_insert, _):
self.hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_legacy_sql_override_propagates_properly(
self, mock_insert, mock_get_service, mock_get_creds_and_proj_id
):
bq_hook = BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is False
class TestBigQueryHookRunWithConfiguration(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.LoadJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_run_with_configuration_location(self, mock_client, mock_job):
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_job._JOB_TYPE = "load"
conf = {"load": {}}
self.hook.running_job_id = running_job_id
self.hook.location = location
self.hook.run_with_configuration(conf)
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=location)
mock_job.from_api_repr.assert_called_once_with(
{
"configuration": conf,
"jobReference": {"jobId": mock.ANY, "projectId": PROJECT_ID, "location": location},
},
mock_client.return_value,
)
class TestBigQueryWithKMS(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_kms(self, mock_bq_client, mock_table):
schema_fields = [{"name": "id", "type": "STRING", "mode": "REQUIRED"}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
"tableReference": {"tableId": TABLE_ID, 'projectId': PROJECT_ID, 'datasetId': DATASET_ID},
"schema": {"fields": schema_fields},
"encryptionConfiguration": encryption_configuration,
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_with_kms(self, mock_create):
external_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
source_uris = ['test_data.csv']
source_format = 'CSV'
autodetect = False
compression = 'NONE'
ignore_unknown_values = False
max_bad_records = 10
skip_leading_rows = 1
field_delimiter = ','
quote_character = None
allow_quoted_newlines = False
allow_jagged_rows = False
encoding = "UTF-8"
labels = {'label1': 'test1', 'label2': 'test2'}
schema_fields = [
{
'mode': 'REQUIRED',
'name': 'id',
'type': 'STRING',
'description': None,
'policyTags': {'names': []},
}
]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_external_table(
external_project_dataset_table=external_project_dataset_table,
source_uris=source_uris,
source_format=source_format,
autodetect=autodetect,
compression=compression,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
skip_leading_rows=skip_leading_rows,
field_delimiter=field_delimiter,
quote_character=quote_character,
allow_jagged_rows=allow_jagged_rows,
encoding=encoding,
allow_quoted_newlines=allow_quoted_newlines,
labels=labels,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
'schema': {'fields': schema_fields},
'maxBadRecords': max_bad_records,
'csvOptions': {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
},
},
'tableReference': {
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
'tableId': TABLE_ID,
},
'labels': labels,
"encryptionConfiguration": encryption_configuration,
}
mock_create.assert_called_once_with(
table_resource=body,
project_id=PROJECT_ID,
location=None,
exists_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_query(sql='query', encryption_configuration=encryption_configuration)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['query']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_copy_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_copy(
source_project_dataset_tables='p.d.st',
destination_project_dataset_table='p.d.dt',
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['copy']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_load(
destination_project_dataset_table='p.d.dt',
source_uris=['abc.csv'],
autodetect=True,
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['load']['destinationEncryptionConfiguration'] is encryption_configuration
)
class TestBigQueryBaseCursorMethodsDeprecationWarning(unittest.TestCase):
@parameterized.expand(
[
("create_empty_table",),
("create_empty_dataset",),
("get_dataset_tables",),
("delete_dataset",),
("create_external_table",),
("patch_table",),
("insert_all",),
("update_dataset",),
("patch_dataset",),
("get_dataset_tables_list",),
("get_datasets_list",),
("get_dataset",),
("run_grant_dataset_view_access",),
("run_table_upsert",),
("run_table_delete",),
("get_tabledata",),
("get_schema",),
("poll_job_complete",),
("cancel_query",),
("run_with_configuration",),
("run_load",),
("run_copy",),
("run_extract",),
("run_query",),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook")
def test_deprecation_warning(self, func_name, mock_bq_hook):
args, kwargs = [1], {"param1": "val1"}
new_path = re.escape(f"`airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.{func_name}`")
message_pattern = fr"This method is deprecated\.\s+Please use {new_path}"
message_regex = re.compile(message_pattern, re.MULTILINE)
mocked_func = getattr(mock_bq_hook, func_name)
bq_cursor = BigQueryCursor(mock.MagicMock(), PROJECT_ID, mock_bq_hook)
func = getattr(bq_cursor, func_name)
with pytest.warns(DeprecationWarning, match=message_regex):
_ = func(*args, **kwargs)
mocked_func.assert_called_once_with(*args, **kwargs)
assert re.search(f".*{new_path}.*", func.__doc__)
class TestBigQueryWithLabelsAndDescription(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_labels(self, mock_insert):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['labels'] is labels
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_description(self, mock_insert):
description = "Test Description"
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['description'] is description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_labels(self, mock_create):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_create.call_args
self.assertDictEqual(kwargs['table_resource']['labels'], labels)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_description(self, mock_create):
description = "Test Description"
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_create.call_args
assert kwargs['table_resource']['description'] is description
|
{
"content_hash": "908665f9899dfcae9ff800f0245cfa04",
"timestamp": "",
"source": "github",
"line_count": 1999,
"max_line_length": 110,
"avg_line_length": 42.29964982491246,
"alnum_prop": 0.5936587154227326,
"repo_name": "dhuang/incubator-airflow",
"id": "66d0003cac79dd4d610d2c76fe0400a338dd587f",
"size": "85346",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/hooks/test_bigquery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
"""Lower-level utilities, including some git helpers."""
from fabric.api import env, local, require, settings
from fabric.colors import green
import os
def compare_versions(x, y):
"""
Expects 2 strings in the format of 'X.Y.Z' where X, Y and Z are
integers. It will compare the items which will organize things
properly by their major, minor and bugfix version.
::
>>> my_list = ['v1.13', 'v1.14.2', 'v1.14.1', 'v1.9', 'v1.1']
>>> sorted(my_list, cmp=compare_versions)
['v1.1', 'v1.9', 'v1.13', 'v1.14.1', 'v1.14.2']
"""
def version_to_tuple(version):
# Trim off the leading v
version_list = version[1:].split('.', 2)
if len(version_list) <= 3:
[version_list.append(0) for _ in range(3 - len(version_list))]
try:
return tuple((int(version) for version in version_list))
except ValueError: # not an integer, so it goes to the bottom
return (0, 0, 0)
x_major, x_minor, x_bugfix = version_to_tuple(x)
y_major, y_minor, y_bugfix = version_to_tuple(y)
return (cmp(x_major, y_major) or cmp(x_minor, y_minor)
or cmp(x_bugfix, y_bugfix))
def store_deployed_version():
if env.sha_url_template:
env.deployed_version = None
with settings(warn_only=True):
env.deployed_version = local('curl -s %s' % sha_url(), capture=True
).strip('"')
if env.deployed_version and len(env.deployment_type) > 10:
env.deployed_version = None
else:
print(green("The currently deployed version is %(deployed_version)s"
% env))
def sha_url():
require('sha_url_template')
if env.deployment_type == 'PRODUCTION':
subdomain = 'www.'
else:
subdomain = env.deployment_type.lower() + '.'
return env.sha_url_template % subdomain
def absolute_release_path():
require('path')
require('current_release_path')
return os.path.join(env.path, env.current_release_path)
def branch(ref=None):
"""Return the name of the current git branch."""
ref = ref or "HEAD"
return local("git symbolic-ref %s 2>/dev/null | awk -F/ {'print $NF'}"
% ref, capture=True)
def sha_for_file(input_file, block_size=2**20):
import hashlib
sha = hashlib.sha256()
with open(input_file, 'rb') as f:
for chunk in iter(lambda: f.read(block_size), ''):
sha.update(chunk)
return sha.hexdigest()
|
{
"content_hash": "0ece9fe4cfdb7a19b81a88602c835ceb",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 35.628571428571426,
"alnum_prop": 0.5966319165998396,
"repo_name": "alexmerser/ops",
"id": "08b2c0df5d108332dc3eb57e46248c5ea391c7ec",
"size": "2494",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "buedafab/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57988"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Feb 19 10:14:34 2015
@author: wirkert
"""
from randomForest import randomForest
from calculateWeightsForDomainAdaption import calculateWeights
import random
import numpy as np
import time
import matplotlib.pyplot as plt
def estimateParametersRealImage(trainingParameters, trainingReflectances, shape, image, trainsegmentation, testsegmentation, activateDA):
sourceReflectancesDA = image[np.nonzero(trainsegmentation)[0], :]
# choose m reflectances for training DA
m = trainingReflectances.shape[0]
sourceReflectancesDA = np.matrix(random.sample(sourceReflectancesDA, m))
#%% 2. determine domain adaptation weights
trainingWeights = np.ones(trainingReflectances.shape[0])
if (activateDA):
trainingWeights = calculateWeights(trainingReflectances, sourceReflectancesDA)
#%% 3. train forest
rf = randomForest(trainingParameters, trainingReflectances, trainingWeights)
#%% 4. estimate the parameters for the image
print "starting to estimate the tissue parameters"
start = time.time()
estimatedParameters = rf.predict(image)
# set to zero if not in segmentation mask
#estimatedParameters[np.where(0 == testsegmentation), :] = 0
end = time.time()
print "time necessary to estimate parameters for image [s]: " + str((end - start))
#%% save the parametric images TODO delete after everything works
# import Image
#
# for i in np.arange(0,estimatedParameters.shape[1]):
# parameterImage_i = np.reshape(estimatedParameters[:,i], shape)
# im = Image.fromarray(parameterImage_i)
# im.save("data/output/" + "parameterImage_" + str(i) + ".tiff")
#%% 6. evaluate data
# for this, create monte carlo simulation for each
# parameter estimate. The resulted reflectance estimate can then be compared to
# the measured reflectance.
from setup import systemPaths
from setup import simulation
import helper.monteCarloHelper as mch
infileString, outfolderMC, outfolderRS, gpumcmlDirectory, gpumcmlExecutable = systemPaths.initPaths()
infile = open(infileString)
BVFs, Vss, ds, SaO2s, rs, nrSamples, photons, wavelengths, FWHM, eHbO2, eHb, nrSimulations = simulation.noisy()
# the estimated parameters within the segmentation
estimatedParametersOnlySegmented = estimatedParameters[np.nonzero(testsegmentation)[0], :]
# the image reflectances from which these parameters where estimated
inputReflectancesOnlySegmented = image[np.nonzero(testsegmentation)[0], :]
# index vector for selecting n samples from this data
indices = np.arange(0, estimatedParametersOnlySegmented.shape[0], 1)
# choose n
n = 20
nSamples = random.sample(indices, n)
estimatedParametersOnlySegmented = estimatedParametersOnlySegmented[nSamples]
inputReflectancesOnlySegmented = inputReflectancesOnlySegmented[nSamples]
# placeholder for the reflectance computed from MC with the estimated parameters
reflectancesFromEstimatedParameters = np.zeros((inputReflectancesOnlySegmented.shape[0], inputReflectancesOnlySegmented.shape[1]+1))
#wavelengths = np.delete(wavelengths, [2, 7])
for i, (BVF, Vs, d) in enumerate(estimatedParametersOnlySegmented):
print('starting simulation ' + str(i) + ' of ' + str(estimatedParametersOnlySegmented.shape[0]))
for j, wavelength in enumerate(wavelengths):
reflectanceValue = mch.runOneSimulation(
wavelength, eHbO2, eHb,
infile, outfolderMC, gpumcmlDirectory, gpumcmlExecutable,
BVF, Vs, d,
# np.mean(rs), SaO2,
# submucosa_BVF=sm_BVF, submucosa_Vs=sm_Vs, submucosa_SaO2=SaO2,
Fwhm = FWHM, nrPhotons=photons)
#print((BVF, Vs, d, wavelength))
reflectancesFromEstimatedParameters[i, j] = reflectanceValue
# correct these reflectances by image quotient
reflectancesFromEstimatedParameters = mch.normalizeImageQuotient(reflectancesFromEstimatedParameters)
wavelengths = mch.removeIqWavelength(wavelengths)
#%% plot data for nicer inspection
from sklearn.metrics import r2_score
r2Score = r2_score(reflectancesFromEstimatedParameters.T, inputReflectancesOnlySegmented.T)
print("r2Score for random forest estimatation of:", str(r2Score))
#%% sort by wavelength:
for plot_i in range(n):
sortedIndices = sorted(range(len(wavelengths)), key=lambda k: wavelengths[k])
plt.figure()
plt.plot(wavelengths[sortedIndices], reflectancesFromEstimatedParameters[plot_i,sortedIndices], 'g-o')
plt.plot(wavelengths[sortedIndices], inputReflectancesOnlySegmented[plot_i,sortedIndices], 'b-o')
print(str(r2_score(reflectancesFromEstimatedParameters[plot_i, :], inputReflectancesOnlySegmented[plot_i, :])))
plt.legend(["estimated", "measurement"])
plt.xlabel("wavelength [m]")
plt.ylabel("normalized reflectance")
plt.savefig("data/output/example_fit_" + str(plot_i) + '.png')
return estimatedParameters, r2Score, reflectancesFromEstimatedParameters, inputReflectancesOnlySegmented
|
{
"content_hash": "b6fb2f068cce9b4e948acdeea42b4950",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 137,
"avg_line_length": 35.11486486486486,
"alnum_prop": 0.7186838560708101,
"repo_name": "NifTK/MITK",
"id": "8635ce03a1a2673c42c2387928ea0805902aa7ca",
"size": "5221",
"binary": false,
"copies": "3",
"ref": "refs/heads/niftk",
"path": "Modules/Biophotonics/python/inverseMonteCarlo/estimateParametersRealImage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2343401"
},
{
"name": "C++",
"bytes": "28797565"
},
{
"name": "CMake",
"bytes": "864932"
},
{
"name": "CSS",
"bytes": "117579"
},
{
"name": "HTML",
"bytes": "101835"
},
{
"name": "JavaScript",
"bytes": "167070"
},
{
"name": "Makefile",
"bytes": "25077"
},
{
"name": "Objective-C",
"bytes": "476388"
},
{
"name": "Python",
"bytes": "62916"
},
{
"name": "QMake",
"bytes": "5583"
},
{
"name": "Shell",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
def product_upsell(product):
"""
Display the list of products that are upsell candidates for currently viewed product.
"""
goals = None
if product.upselltargets.count() > 0:
goals = product.upselltargets.all()
return { 'goals' : goals }
register.inclusion_tag("upsell/product_upsell.html", takes_context=False)(product_upsell)
|
{
"content_hash": "b7be973cfd440102a87a1af1450da64c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 89,
"avg_line_length": 30.5,
"alnum_prop": 0.6932084309133489,
"repo_name": "ringemup/satchmo",
"id": "bcc6bcd0b9652e66d670701f0133326de777dcfa",
"size": "427",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "satchmo/apps/satchmo_ext/upsell/templatetags/satchmo_upsell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "73898"
},
{
"name": "Python",
"bytes": "1759077"
}
],
"symlink_target": ""
}
|
""" Functioned to introspect a model """
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields import FieldDoesNotExist
from django.conf import settings
import inspect
def isprop(v):
return isinstance(v, property)
def get_properties_from_model(model_class):
""" Show properties from a model """
properties = []
attr_names = [name for (name, value) in inspect.getmembers(model_class, isprop)]
for attr_name in attr_names:
if attr_name.endswith('pk'):
attr_names.remove(attr_name)
else:
properties.append(dict(label=attr_name, name=attr_name.strip('_').replace('_',' ')))
return sorted(properties, key=lambda k: k['label'])
def get_relation_fields_from_model(model_class):
""" Get related fields (m2m, FK, and reverse FK) """
relation_fields = []
all_fields_names = model_class._meta.get_all_field_names()
for field_name in all_fields_names:
field = model_class._meta.get_field_by_name(field_name)
# get_all_field_names will return the same field
# both with and without _id. Ignore the duplicate.
if field_name[-3:] == '_id' and field_name[:-3] in all_fields_names:
continue
if field[3] or not field[2] or hasattr(field[0], 'related'):
field[0].field_name = field_name
relation_fields += [field[0]]
return relation_fields
def get_direct_fields_from_model(model_class):
""" Direct, not m2m, not FK """
direct_fields = []
all_fields_names = model_class._meta.get_all_field_names()
for field_name in all_fields_names:
field = model_class._meta.get_field_by_name(field_name)
if field[2] and not field[3] and not hasattr(field[0], 'related'):
direct_fields += [field[0]]
return direct_fields
def get_custom_fields_from_model(model_class):
""" django-custom-fields support """
if 'custom_field' in settings.INSTALLED_APPS:
from custom_field.models import CustomField
try:
content_type = ContentType.objects.get(
model=model_class._meta.model_name,
app_label=model_class._meta.app_label)
except ContentType.DoesNotExist:
content_type = None
custom_fields = CustomField.objects.filter(content_type=content_type)
return custom_fields
def get_model_from_path_string(root_model, path):
""" Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
"""
for path_section in path.split('__'):
if path_section:
try:
field = root_model._meta.get_field_by_name(path_section)
except FieldDoesNotExist:
return root_model
if field[2]:
if hasattr(field[0], 'related'):
try:
root_model = field[0].related.parent_model()
except AttributeError:
root_model = field[0].related.model
else:
if hasattr(field[0], 'related_model'):
root_model = field[0].related_model
else:
root_model = field[0].model
return root_model
|
{
"content_hash": "89b55b8ebe6d3f4373f49a4f4fd14f2c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 96,
"avg_line_length": 38.95294117647059,
"alnum_prop": 0.6088794926004228,
"repo_name": "burke-software/django-report-utils",
"id": "f4f826f35bda9db16ec64c45aa102037bcaa1923",
"size": "3311",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "report_utils/model_introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28576"
}
],
"symlink_target": ""
}
|
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
DEBUG_TOOLBAR = DEBUG
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Pablo Recio', 'me@pablorecio.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'casterly', # Or path to database file if using sqlite3.
'USER': 'casterly', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASEDIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_ROOT = BASEDIR
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASEDIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'o(zm0jm)i(7m(pakw4)%thnoww-0f^h6qk6ek$fhbbd=x437m4'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'casterly.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'casterly.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(BASEDIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'debug_toolbar',
'south',
'gunicorn',
'money',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from local_settings import *
except ImportError:
pass
|
{
"content_hash": "2156fe8f3536d26af453071e187a4903",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 120,
"avg_line_length": 31.11038961038961,
"alnum_prop": 0.6756418284283031,
"repo_name": "shakaran/casterly",
"id": "2ffa289c0ec1c0dcfb3e417dcc7eab9be43c4f6c",
"size": "4832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "casterly/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "298"
},
{
"name": "Python",
"bytes": "61437"
}
],
"symlink_target": ""
}
|
import os
from examples.core.transforms import *
from examples.core.transform import TransformListFactory, TransformationList
class Pipeline(Transform):
"""Tile a linalg op with `tile_sizes`.
This transform can be configured as follows:
* `ms_unroll`: Level of unrolling of the given loop
* `ms_distance`: Distance between a load and a compute operation
"""
variables = {
"unroll": (IntVariable, []),
"distance": (IntVariable, []),
}
def __init__(self, fun_name: str, op_name: str, **kwargs):
self._parse_variables_in_kwargs(kwargs)
unrolling_str = f"unrolling={self.unroll}"
distance_str = f"distance={self.distance}"
pipeline = (f"alp-modulo-scheduling{{"
f" {unrolling_str} "
f" {distance_str}}},"
f"canonicalize,"
f"cse")
self.pipeline = f"func.func({pipeline})"
class ExtractKernel(Transform):
"""Tile a linalg op with `tile_sizes`.
This transform can be configured as follows:
* `ms_unroll`: Level of unrolling of the given loop
* `ms_distance`: Distance between a load and a compute operation
"""
def __init__(self, fun_name: str, op_name: str, **kwargs):
self.pipeline = f"alp-extract-kernel," f"canonicalize," f"cse"
class ConvertLoops(Transform):
"""Tile a linalg op with `tile_sizes`.
This transform can be configured as follows:
* `ms_unroll`: Level of unrolling of the given loop
* `ms_distance`: Distance between a load and a compute operation
"""
def __init__(self, fun_name: str, op_name: str, **kwargs):
self._parse_variables_in_kwargs(kwargs)
pipeline = (f"alp-for-to-dowhile{{"
f" anchor-func={fun_name}}},"
f"canonicalize,"
f"cse")
self.pipeline = f"func.func({pipeline})"
|
{
"content_hash": "28eef5fea05f22a4035feff8fb66bb88",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 32.31578947368421,
"alnum_prop": 0.6248642779587406,
"repo_name": "iree-org/iree-llvm-sandbox",
"id": "80d90ebde70d60881820b15a6d51450a7c2ea5f0",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "experimental/alp/python/alp/backend/transforms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3782"
},
{
"name": "C++",
"bytes": "245631"
},
{
"name": "CMake",
"bytes": "21081"
},
{
"name": "MLIR",
"bytes": "81484"
},
{
"name": "Python",
"bytes": "670324"
},
{
"name": "Shell",
"bytes": "101058"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from .grains.rich_text import *
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'wagtaildraftail',
'wagtail.wagtailforms',
'wagtail.wagtailredirects',
'wagtail.wagtailembeds',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailimages',
'wagtail.wagtailsearch',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'testapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'var', 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'var', 'media')
MEDIA_URL = '/media/'
# Wagtail settings
WAGTAIL_SITE_NAME = "testapp"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'http://example.com'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4*5e^@2%(h#$*b4=ze_kcdw46-$0z#rrf3661c5(&+x^oj=4)+'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import *
except ImportError:
pass
|
{
"content_hash": "e313b2c7045f979c9cd4053f57a91049",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 26.02097902097902,
"alnum_prop": 0.6885245901639344,
"repo_name": "gasman/wagtaildraftail",
"id": "5ebd5ce1f51964dca053ae9b63d1472c9ac5e93c",
"size": "3721",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/testapp/testapp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "974"
},
{
"name": "HTML",
"bytes": "2548"
},
{
"name": "JavaScript",
"bytes": "21637"
},
{
"name": "Makefile",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "48232"
},
{
"name": "Shell",
"bytes": "1585"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
__version__ = '0.5'
|
{
"content_hash": "3fe034e9f3dcbcc43b14831b4e2bbe29",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.6557377049180327,
"repo_name": "vmonaco/pohmm",
"id": "6f9eeeb196c95af8e13d7d6b93b42809b9d07662",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pohmm/version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "779"
},
{
"name": "Python",
"bytes": "70448"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0122_cobaltio_product'),
]
operations = [
migrations.AddField(
model_name='test',
name='scan_type',
field=models.TextField(null=True),
),
]
|
{
"content_hash": "031797303b893d023ef8a1faaf13b05c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 46,
"avg_line_length": 20.4375,
"alnum_prop": 0.5596330275229358,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "bc660162d4fa2c0b9144e440b3c64ecf51a09d16",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/db_migrations/0123_scan_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
}
|
"""
Problem 19: Counting Sundays
You are given the following information, but you may prefer to do some research
for yourself.
- 1 Jan 1900 was a Monday.
- Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
- A leap year occurs on any year evenly divisible by 4, but not on a century
unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century
(1 Jan 1901 to 31 Dec 2000)?
"""
from time import time
start = time()
# Als je het jaar steeds in maart begint, dan geldt dat voor ieder jaar steeds
# twee zondagen op de eerste dag van de maand vallen, behalve als die zondag op
# 1 augustus of 1 oktober valt.
# Als 1 oktober een zondag is, dan is 1 augustus een dinsdag. Het volstaat dus
# om te kijken in welke jaren 1 augustus op een zondag of op een dinsdag valt.
# In die jaren is er één zondag, in alle andere jaren zijn er twee. Op het eind
# van de berekening moet je nog even corrigeren voor als januari of februari
# een zondag had (in welk geval 1 augustus het jaar daarvoor resp. op een maandag
# of een vrijdag zou vallen).
aug = 3 # in 1900 valt 1 augustus op een woensdag
teller = 0 # in 1901 hebben januari noch februari een zondag als eerste dag
for x in range(1901, 2001):
aug += 1
if x % 4 == 0:
aug += 1
teller += 2
if aug % 7 in [0, 2]:
teller -= 1
if aug in [1, 5]:
teller -= 1
print teller
print 'Tijd: ', time() - start
|
{
"content_hash": "14a42fabe8f25775a8db68c717216a61",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 82,
"avg_line_length": 30.84313725490196,
"alnum_prop": 0.7082008900190718,
"repo_name": "hendrikjeb/Euler",
"id": "d0f84f6b981777b73f601127d75fbecf51cf7ac7",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "19.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41446"
}
],
"symlink_target": ""
}
|
from .base_renderer import *
import os
import re
import docutils.writers.html4css1
from docutils.core import publish_parts
from docutils.writers.html4css1 import Writer, HTMLTranslator
docutils_dir = os.path.dirname(docutils.writers.html4css1.__file__)
class GitHubHTMLTranslator(HTMLTranslator):
def visit_literal_block(self, node):
classes = node.attributes['classes']
if len(classes) >= 2 and classes[0] == 'code':
language = classes[1]
del classes[:]
self.body.append(self.starttag(node, 'pre', lang=language, CLASS='codehilite'))
else:
self.body.append(self.starttag(node, 'pre', CLASS='codehilite'))
@renderer
class RstRenderer(MarkupRenderer):
FILENAME_PATTERN_RE = re.compile(r'\.re?st$')
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == "text.restructuredtext":
return True
return cls.FILENAME_PATTERN_RE.search(filename) is not None
def render(self, text, **kwargs):
settings_overrides = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': False,
'report_level': 5,
'syntax_highlight': 'short',
'math_output': 'latex',
'input_encoding': 'utf-8',
'output_encoding': 'utf-8',
'stylesheet_dirs': [os.path.normpath(os.path.join(docutils_dir, Writer.default_stylesheet))],
'template': os.path.normpath(os.path.join(docutils_dir, Writer.default_template))
}
writer = Writer()
writer.translator_class = GitHubHTMLTranslator
output = publish_parts(
text, writer=writer, settings_overrides=settings_overrides
)
if 'html_body' in output:
return output['html_body']
return ''
|
{
"content_hash": "287791ce912e8178998b3671d2f687f6",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 105,
"avg_line_length": 35.07272727272727,
"alnum_prop": 0.6148263348885433,
"repo_name": "timonwong/OmniMarkupPreviewer",
"id": "f969f741bbc69567799767c676a1cd299d912d52",
"size": "1929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "OmniMarkupLib/Renderers/RstRenderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135028"
},
{
"name": "HTML",
"bytes": "550"
},
{
"name": "JavaScript",
"bytes": "366577"
},
{
"name": "Python",
"bytes": "8024507"
},
{
"name": "Ruby",
"bytes": "1168"
},
{
"name": "Smarty",
"bytes": "31148"
},
{
"name": "TeX",
"bytes": "3054"
}
],
"symlink_target": ""
}
|
import os
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'rson'))
imports = set()
def splitfile(s):
keywords = 'class def'.split()
keywords = ['\n%s ' % x for x in keywords]
info = [(s.split(x, 1) + [x]) for x in keywords]
info.sort(key=lambda x:len(x[0]))
top, bottom, keyword = info[0]
bottom = keyword + bottom
for line in top.splitlines():
if line.startswith('import'):
mod, = line.split()[1:]
imports.add(mod)
return bottom
def split_init(s):
top, bottom = s.split('\n# RSON is developed')
bottom = '\n'.join(x[4:] for x in bottom.splitlines())
return top, splitfile(bottom)
files = 'tokenizer baseobjects dispatcher doublequoted unquoted equals parser __init__'
files = files.split()
files = [open(os.path.join(root, x+'.py'), 'rb').read() for x in files]
init_top, init_bottom = split_init(files.pop())
files = [splitfile(x) for x in files]
result = [init_top]
result.extend('import %s' % x for x in sorted(imports))
result.extend(files)
result.append(init_bottom)
result.append('')
result = '\n'.join(result)
f = open(os.path.join(root, 'rson_single.py'), 'wb')
f.write(result)
f.close()
print
print "rson_single.py written"
print
|
{
"content_hash": "aecfe0c00b040bf7b0a5bdc8c9de60c4",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 87,
"avg_line_length": 29.595238095238095,
"alnum_prop": 0.6387771520514883,
"repo_name": "tundish/rson",
"id": "16f41f14ee108bab3f219d275523b235ab6ec9fe",
"size": "1266",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/makesingle.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70503"
}
],
"symlink_target": ""
}
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('quspin')
return config
def setup_package():
try:
import numpy
except:
raise ImportError("build requires numpy for fortran extensions")
import os,sys
if "--omp" in sys.argv:
sys.argv.remove("--omp")
if sys.platform == "win32":
if "CFLAGS" in os.environ:
os.environ["CFLAGS"]=os.environ["CFLAGS"]+" /openmp"
else:
os.environ["CFLAGS"]="/openmp"
else:
if "CFLAGS" in os.environ:
os.environ["CFLAGS"]=os.environ["CFLAGS"]+" -fopenmp"
else:
os.environ["CFLAGS"]="-fopenmp"
if "--default-compiler-flags" in sys.argv:
sys.argv.remove("--default-compiler-flags")
if sys.platform == "win32":
pass
else:
os.environ["CFLAGS"]=os.environ["CFLAGS"]+" -O3 -march=native"
io = open("./conda.recipe/quspin/meta.yaml","r")
meta_file = io.read()
io.close()
meta_file = meta_file.split()
ind = meta_file.index("version")
version = meta_file[ind+2].replace('"','')
metadata = dict(
name='quspin',
version=version,
author="Phillip Weinberg, Marin Bukov",
author_email="weinbe58@gmail.com",
maintainer="Phillip Weinberg, Marin Bukov, Markus Schmitt",
maintainer_email="weinbe58@gmail.com",
download_url="https://github.com/weinbe58/QuSpin.git",
license='BSD',
platforms=["Unix","Windows"]
)
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
{
"content_hash": "a46741086ddacb30becd4e0f4e42fc5e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 66,
"avg_line_length": 23.12162162162162,
"alnum_prop": 0.6832261835184102,
"repo_name": "weinbe58/QuSpin",
"id": "ecfdda769a8ffc3efb7e96bc9345e726da574fc3",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1064"
},
{
"name": "C",
"bytes": "2687"
},
{
"name": "C++",
"bytes": "266965"
},
{
"name": "Jupyter Notebook",
"bytes": "1058"
},
{
"name": "Makefile",
"bytes": "608"
},
{
"name": "Python",
"bytes": "1534241"
},
{
"name": "Shell",
"bytes": "933"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from pip.req import parse_requirements
REQS = [str(ir.req) for ir in parse_requirements('requirements.txt', session=False)]
setup(
name='tabletopscanner',
packages=['tabletopscanner'],
include_package_data=True,
install_requires=REQS,
)
|
{
"content_hash": "38d0a3a6d6d5072d35167d105546ae82",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 84,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.7385159010600707,
"repo_name": "ramseyboy/tabletop-scanner",
"id": "11270c14fdc6c8bc10147edca1a29f28597000e2",
"size": "283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1159"
},
{
"name": "HTML",
"bytes": "1310"
},
{
"name": "JavaScript",
"bytes": "10053"
},
{
"name": "Python",
"bytes": "12384"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
}
|
"""Forms for core app."""
import logging
from django import forms
from django.contrib.auth.models import User
from django.forms.fields import CharField
from django.utils.translation import ugettext_lazy as _
from .models import UserProfile
log = logging.getLogger(__name__)
class UserProfileForm(forms.ModelForm):
first_name = CharField(label=_('First name'), required=False, max_length=30)
last_name = CharField(label=_('Last name'), required=False, max_length=30)
class Meta:
model = UserProfile
# Don't allow users edit someone else's user page
fields = ['first_name', 'last_name', 'homepage']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.fields['first_name'].initial = self.instance.user.first_name
self.fields['last_name'].initial = self.instance.user.last_name
except AttributeError:
pass
def save(self, commit=True):
first_name = self.cleaned_data.pop('first_name', None)
last_name = self.cleaned_data.pop('last_name', None)
profile = super().save(commit=commit)
if commit:
user = profile.user
user.first_name = first_name
user.last_name = last_name
user.save()
return profile
class UserDeleteForm(forms.ModelForm):
username = CharField(
label=_('Username'),
help_text=_('Please type your username to confirm.'),
)
class Meta:
model = User
fields = ['username']
def clean_username(self):
data = self.cleaned_data['username']
if self.instance.username != data:
raise forms.ValidationError(_('Username does not match!'))
return data
class UserAdvertisingForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['allow_ads']
class FacetField(forms.MultipleChoiceField):
"""
For filtering searches on a facet.
Has validation for the format of facet values.
"""
def valid_value(self, value):
"""
Although this is a choice field, no choices need to be supplied.
Instead, we just validate that the value is in the correct format for
facet filtering (facet_name:value)
"""
if ':' not in value:
return False
return True
|
{
"content_hash": "8f98c49edb994594f0c150e676589c2e",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 26.943181818181817,
"alnum_prop": 0.6199915647406158,
"repo_name": "rtfd/readthedocs.org",
"id": "34ebfbd0d2ea3fad1bccac3152b405c1d714a9ed",
"size": "2396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/core/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
}
|
import json
import flask
from flask import current_app as app
from jenkins_reporting import db
from jenkins_reporting import stats
root_bp = flask.Blueprint('root', __name__)
iso_bp = flask.Blueprint('iso', __name__, template_folder='templates')
staging_bp = flask.Blueprint('staging', __name__, template_folder='templates')
@root_bp.route('/')
def index():
default_version = app.config["MASTER_CI"]["ISO_JOBS"][0]
url = flask.url_for('iso.iso', version=default_version)
return flask.redirect(url)
def _get_iso_test_types(builds):
latest_build = next(build for build in builds if
(build.downstream.keys()
and build.result in ["SUCCESS", "FAILURE"]))
return sorted(latest_build.downstream.keys())
def _get_ci_config(param_type, param):
master_ci = app.config["MASTER_CI"]
stable_ci = app.config["STABLE_CI"]
if param in master_ci[param_type]:
return master_ci
elif param in stable_ci[param_type]:
return stable_ci
else:
raise Exception("Invalid parameters {0}, {1}".format(
param_type, param))
def _get_jobs_list(param_type):
master_ci = app.config["MASTER_CI"]
stable_ci = app.config["STABLE_CI"]
return sorted(master_ci[param_type] + stable_ci[param_type],
reverse=True)
@iso_bp.route('/<version>')
def iso(version):
job = "{0}.all".format(version)
builds = db.get_iso_builds(job)
if builds:
test_types = _get_iso_test_types(builds)
else:
test_types = []
ci = _get_ci_config("ISO_JOBS", version)
return flask.render_template(
"iso.html",
builds=builds,
test_types=test_types,
version=version,
jenkins=ci["JENKINS"],
staging_jobs=_get_jobs_list("STAGING_JOBS"),
iso_jobs=_get_jobs_list("ISO_JOBS"))
def _prepare_data_for_charts(top_n):
data = []
for k, v in top_n.items():
data.append({"label": k, "value": v})
return json.dumps(data)
@staging_bp.route('/<job>')
def staging(job):
ci = _get_ci_config("STAGING_JOBS", job)
builds = db.get_staging_builds(job)
failed_builds = filter(lambda x: x['result'] == 'FAILURE', builds)
metrics = stats.get_basic_stats(builds, failed_builds)
top_by_team = _prepare_data_for_charts(
stats.get_top_by_team(failed_builds))
return flask.render_template(
"staging.html",
job=job,
metrics=metrics,
builds=failed_builds,
jenkins=ci["JENKINS"],
staging_jobs=_get_jobs_list("STAGING_JOBS"),
iso_jobs=_get_jobs_list("ISO_JOBS"),
top_by_team=top_by_team)
|
{
"content_hash": "6c59abc893f5c96c7593f662cf8f9738",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 27.84375,
"alnum_prop": 0.6154133931911709,
"repo_name": "dkalashnik/failorama",
"id": "587c2fb988631665c2451033301df71508e7ac70",
"size": "2673",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jenkins_reporting/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11756"
},
{
"name": "HTML",
"bytes": "11035"
},
{
"name": "Python",
"bytes": "34753"
},
{
"name": "XSLT",
"bytes": "2056"
}
],
"symlink_target": ""
}
|
import pytest
import json
import pyethereum.processblock as pb
import pyethereum.utils as utils
import pyethereum.bloom as bloom
import os
import sys
def check_testdata(data_keys, expected_keys):
assert set(data_keys) == set(expected_keys), \
"test data changed, please adjust tests"
@pytest.fixture(scope="module")
def vm_tests_fixtures():
"""
Read vm tests from fixtures
fixtures/VMTests/*
"""
# FIXME: assert that repo is uptodate
# cd fixtures; git pull origin develop; cd ..; git commit fixtures
filenames = os.listdir(os.path.join('fixtures', 'VMTests'))
files = [os.path.join('fixtures', 'VMTests', f) for f in filenames]
vm_fixtures = {}
try:
for f, fn in zip(files, filenames):
if f[-5:] == '.json':
vm_fixtures[fn[:-5]] = json.load(open(f, 'r'))
except IOError, e:
raise IOError("Could not read vmtests.json from fixtures",
"Make sure you did 'git submodule init'")
return vm_fixtures
# SETUP TESTS IN GLOBAL NAME SPACE
def gen_func(testdata):
return lambda: do_test_bloom(testdata)
for filename, tests in vm_tests_fixtures().items():
for testname, testdata in tests.items():
if 'logs' not in testdata or 'log' not in testname.lower():
continue
func_name = 'test_%s_%s' % (filename, testname)
globals()[func_name] = gen_func(testdata['logs'])
def decode_int_from_hex(x):
r = utils.decode_int(x.decode('hex').lstrip("\x00"))
return r
def encode_hex_from_int(x):
return utils.zpad(utils.int_to_big_endian(x), 64).encode('hex')
def do_test_bloom(test_logs):
"""
The logs sections is a mapping between the blooms and their corresponding logentries.
Each logentry has the format:
address: The address of the logentry.
data: The data of the logentry.
topics: The topics of the logentry, given as an array of values.
"""
for data in test_logs:
print data
address = data['address']
# Test via bloom
b = bloom.bloom_insert(0, address.decode('hex'))
for t in data['topics']:
b = bloom.bloom_insert(b, t.decode('hex'))
# Test via Log
topics = [decode_int_from_hex(x) for x in data['topics']]
log = pb.Log(address, topics, '')
log_bloom = bloom.b64(bloom.bloom_from_list(log.bloomables()))
assert log_bloom.encode('hex') == encode_hex_from_int(b)
assert data['bloom'] == log_bloom.encode('hex')
|
{
"content_hash": "0c44fbe5c26ff048a6829fc6cd489422",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 89,
"avg_line_length": 32.74025974025974,
"alnum_prop": 0.6287187623958747,
"repo_name": "joelcan/tools-eth-contract-dev",
"id": "a184a518b3a0639f3ec4f847591d8ecd2b045471",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyethereum/tests/test_bloom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1657"
},
{
"name": "Python",
"bytes": "513392"
},
{
"name": "Shell",
"bytes": "1202"
}
],
"symlink_target": ""
}
|
from django.core.cache import cache
from django.contrib.auth.decorators import login_required
from django.template.defaultfilters import slugify
from pompadour_wiki.apps.utils.decorators import render_to
from pompadour_wiki.apps.wiki.models import Wiki
from datetime import datetime
import os
class LastEdits(object):
"""
This class is implemented as a list to make algorithm easier
"""
def __init__(self, wikis):
""" Get last edits """
self.edits = []
self.wikis = wikis
for wiki in wikis:
# Get the last ten commits
last_10_commits = wiki.repo.log(limit=10)
# And for each commits, add the modified files to the list
for c in last_10_commits:
if c.parents:
diff = c.tree.diff(c.parents[0].tree)
for patch in diff:
path = patch.new_file_path
# Exclude __media__ files :
if not path.startswith('__media__'):
node = {
'wiki': wiki,
'filename': path,
'page': os.path.splitext(path)[0],
'author': {
'name': c.author.name,
'email': c.author.email
},
'date': datetime.fromtimestamp(c.commit_time),
}
# Check if the file is in the list
if node not in self:
# Add the file to the list
self.edits.append(node)
# end for each diffs
# No parents, root commit
else:
# Add each files in the commit tree
for entry in wiki.repo.walk():
# Except __media__ files
if not entry.path.startswith('__media__'):
node = {
'wiki': wiki,
'filename': entry.path,
'page': os.path.splitext(entry.path)[0],
'author': {
'name': c.author.name,
'email': c.author.email
},
'date': datetime.fromtimestamp(c.commit_time),
}
# Check if the file is in the list
if node not in self:
# Add to the list
self.edits.append(node)
# end for each blob
# end for each commits
# end for each wikis
self.edits.sort(key=lambda x: x['date'], reverse=True)
# list API
def __len__(self):
return len(self.edits)
def __getitem__(self, key):
return self.edits[key]
def __setitem__(self, key, value):
self.edits[key] = value
def __delitem__(self, key):
del self.edits[key]
def __iter__(self):
return iter(self.edits)
def __contains__(self, node):
""" Check if the file is already in the list """
for edit in self.edits:
if edit['filename'] == node['filename'] and edit['wiki'] == node['wiki']:
return True
return False
@login_required
@render_to('index.html')
def home(request):
wikis = Wiki.objects.all()
# retrieve last edits from cache
if not cache.has_key('LastEdits'):
cache.set('LastEdits', LastEdits(wikis)[:10], cache.default_timeout)
last_edits = cache.get('LastEdits')
return {'wiki': {
'home': True,
'array': [wikis[x:x + 4] for x in range(0, len(wikis), 4)],
'last_edits': last_edits,
}}
@login_required
@render_to('index.html')
def search(request):
wikis = Wiki.objects.all()
# retrieve last edits from cache
if not cache.has_key('LastEdits'):
cache.set('LastEdits', LastEdits(wikis)[:10], cache.default_timeout)
last_edits = cache.get('LastEdits')
data = {'wiki': {
'home': True,
'array': [wikis[x:x + 4] for x in range(0, len(wikis), 4)],
'last_edits': last_edits,
}}
if request.method == 'POST':
query = request.POST['search-query']
data['wiki']['search'] = query
results = []
# For each wiki
for wiki in wikis:
# Do the search
for filename, matches in wiki.repo.search(query, exclude=r'^__media__'):
# Get informations from the file
print filename
last_commit = wiki.repo.log(name=filename, limit=1)[0]
# and append to the list
results.append({
'id': '{0}_{1}'.format(last_commit.hex, slugify(filename)),
'wiki': wiki,
'file': os.path.splitext(filename)[0],
'matches': matches,
'author': last_commit.author,
'date': datetime.fromtimestamp(last_commit.commit_time),
})
# now sort the list
results.sort(key=lambda x: x['date'], reverse=True)
data['wiki']['search_results'] = results
return data
@render_to('index.html')
def login_failed(request, message, status=None, template_name=None, exception=None):
return {'error': message}
|
{
"content_hash": "518c8c0f7ef0253e1b193b3c31d15242",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 85,
"avg_line_length": 31.865168539325843,
"alnum_prop": 0.47143864598025387,
"repo_name": "9h37/pompadour-wiki",
"id": "b34f7eed27baa5f4d2de139fcdba94e119e78442",
"size": "5697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pompadour_wiki/pompadour_wiki/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "483169"
},
{
"name": "Python",
"bytes": "80525"
},
{
"name": "Shell",
"bytes": "596"
}
],
"symlink_target": ""
}
|
import unittest
from flask_babel import LazyString
from unittest.mock import patch, MagicMock
from app.exceptions.base import ResourceNotFoundException, \
BusinessRuleException
from app.models.oauth.client import OAuthClient
from app.models.oauth.token import OAuthToken
from app.repository import oauth_repository as mock_spec # rename for safety
from app.service import oauth_service
from test import Any
oauth_repo_mock = MagicMock(spec=dir(mock_spec))
client_mock = MagicMock(OAuthClient)
token_mock = MagicMock(OAuthToken)
access_token = "access_token"
refresh_token = "refresh_token"
client_id = "client_id"
client_secret = "client_secret"
grant_code = "grant_code"
code = {"code": "grant_code"}
scopes = "SCOPE1 SCOPE2 SCOPE3"
scopes_list = ["SCOPE1", "SCOPE2", "SCOPE3"]
redirect_uri = "http://svia.nl"
token_type = "Bearer"
expires = 3600
user_id = 1
grant_id = 2
@patch.object(oauth_service, 'oauth_repository', oauth_repo_mock)
class TestOAuthService(unittest.TestCase):
def setUp(self):
oauth_repo_mock.reset_mock()
client_mock.reset_mock()
client_mock.client_id = client_id
client_mock.client_secret = client_secret
client_mock.user_id = user_id
def test_query_token_with_type_hint(self):
oauth_service._query_token(access_token, access_token, client_mock)
oauth_repo_mock.get_token_by_access_token.assert_called_once_with(
access_token=access_token, client_id=client_id)
oauth_service._query_token(refresh_token, refresh_token, client_mock)
oauth_repo_mock.get_token_by_refresh_token.assert_called_once_with(
refresh_token=refresh_token, client_id=client_id)
def test_query_access_token_without_type_hint(self):
oauth_repo_mock.get_token_by_access_token.return_value = token_mock
token = oauth_service._query_token(access_token, None, client_mock)
oauth_repo_mock.get_token_by_access_token.assert_called_once_with(
access_token=access_token, client_id=client_id)
oauth_repo_mock.get_token_by_refresh_token.assert_not_called()
self.assertEquals(token, token_mock)
def test_query_refresh_token_without_type_hint(self):
oauth_repo_mock.get_token_by_access_token.return_value = None
oauth_repo_mock.get_token_by_refresh_token.return_value = token_mock
token = oauth_service._query_token(refresh_token, None, client_mock)
oauth_repo_mock.get_token_by_access_token.assert_called_once_with(
access_token=refresh_token, client_id=client_id)
oauth_repo_mock.get_token_by_refresh_token. \
assert_called_once_with(refresh_token=refresh_token,
client_id=client_id)
self.assertEqual(token_mock, token)
def test_get_client_by_id(self):
expected = MagicMock(spec=dir(OAuthClient))
expected.client_id = client_id
oauth_repo_mock.get_client_by_id.return_value = expected
actual = oauth_service.get_client_by_id(client_id)
oauth_repo_mock.get_client_by_id. \
assert_called_once_with(client_id)
self.assertEqual(actual, expected)
def test_create_token_with_user(self):
request_mock = MagicMock
request_mock.client = client_mock
request_mock.user = None
token = {'somekey': 'somevalue'}
oauth_service.create_token(token, request_mock)
oauth_repo_mock.create_token.assert_called_once_with(
client_id=client_id, user_id=user_id, somekey='somevalue')
def test_create_token_without_user(self):
request_mock = MagicMock
request_mock.client = client_mock
token = {'somekey': 'somevalue'}
oauth_service.create_token(token, request_mock)
oauth_repo_mock.create_token.assert_called_once_with(
client_id=client_id, user_id=user_id, somekey='somevalue')
def test_get_approved_clients_by_user_id(self):
oauth_service.get_approved_clients_by_user_id(user_id=3)
oauth_repo_mock.get_approved_clients_by_user_id \
.assert_called_once_with(user_id=3)
def test_get_owned_clients_by_user_id(self):
oauth_service.get_owned_clients_by_user_id(user_id=3)
oauth_repo_mock.get_owned_clients_by_user_id \
.assert_called_once_with(user_id=3)
def test_revoke_user_tokens_by_client_id(self):
client = MagicMock(spec=dir(OAuthClient))
oauth_repo_mock.get_client_by_id.return_value = client
rv = oauth_service.revoke_user_tokens_by_client_id(user_id=3,
client_id=4)
oauth_repo_mock.revoke_user_tokens_by_client_id \
.assert_called_once_with(user_id=3, client_id=4)
self.assertEqual(client, rv)
def test_revoke_user_tokens_by_client_id_no_client(self):
oauth_repo_mock.get_client_by_id.return_value = None
with self.assertRaises(ResourceNotFoundException):
oauth_service.revoke_user_tokens_by_client_id(user_id=3,
client_id=4)
oauth_repo_mock.get_client_by_id.assert_called_once_with(client_id=4)
def test_get_scope_descriptions(self):
scope_dict = oauth_service.get_scope_descriptions()
self.assertIsInstance(scope_dict, dict)
all(self.assertIsInstance(scope, str) for scope in scope_dict.keys())
all(self.assertIsInstance(scope, LazyString) for scope in
scope_dict.values())
def test_reset_client_secret(self):
oauth_service.reset_client_secret("id")
oauth_repo_mock.update_client_secret.assert_called_once_with(
client_id="id", client_secret=Any(str))
def test_reset_client_secret_no_client(self):
oauth_repo_mock.get_client_by_id.return_value = None
with self.assertRaises(ResourceNotFoundException):
oauth_service.reset_client_secret("id")
def test_reset_client_secret_public_client(self):
oauth_repo_mock.get_client_by_id.return_value = client_mock
client_mock.client_secret = ''
with self.assertRaises(BusinessRuleException):
oauth_service.reset_client_secret("id")
|
{
"content_hash": "06abcdf14c2a3189292db16ac208f41d",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 77,
"avg_line_length": 41.1578947368421,
"alnum_prop": 0.6673593350383632,
"repo_name": "viaict/viaduct",
"id": "1b21c2d3b7ef661daa960ebe46bbdff2f1a745ba",
"size": "6256",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/test_oauth_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1583078"
},
{
"name": "Dockerfile",
"bytes": "1131"
},
{
"name": "HTML",
"bytes": "227955"
},
{
"name": "JavaScript",
"bytes": "63026"
},
{
"name": "Makefile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "770976"
},
{
"name": "Shell",
"bytes": "3004"
},
{
"name": "TypeScript",
"bytes": "3288"
},
{
"name": "Vue",
"bytes": "27869"
}
],
"symlink_target": ""
}
|
from kittens.tui.handler import result_handler
from kitty.boss import Boss
from typing import List
def main():
pass
@result_handler(no_ui=True)
def handle_result(args: List[str], answer: str, target_window_id: int, boss: Boss) -> None:
boss.active_tab.neighboring_window(args[1])
|
{
"content_hash": "536999b18f5b9d67bee636a666546a33",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 91,
"avg_line_length": 26.454545454545453,
"alnum_prop": 0.7353951890034365,
"repo_name": "joshuarubin/dotfiles",
"id": "ae2914409a9a08d7ff70c1759f015a2670e406c7",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private_dot_config/exact_kitty/neighboring_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1671"
},
{
"name": "LiveScript",
"bytes": "1465"
},
{
"name": "Lua",
"bytes": "19435"
},
{
"name": "Nix",
"bytes": "1356"
},
{
"name": "PHP",
"bytes": "1308"
},
{
"name": "PostScript",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "6766"
},
{
"name": "Ruby",
"bytes": "202"
},
{
"name": "Scheme",
"bytes": "1280"
},
{
"name": "Shell",
"bytes": "60850"
}
],
"symlink_target": ""
}
|
"""User roles
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkconfig
from pykern.pkdebug import pkdp
import aenum
import sirepo.feature_config
ROLE_ADM = "adm"
ROLE_PAYMENT_PLAN_ENTERPRISE = "enterprise"
ROLE_PAYMENT_PLAN_PREMIUM = "premium"
PAID_USER_ROLES = (ROLE_PAYMENT_PLAN_PREMIUM, ROLE_PAYMENT_PLAN_ENTERPRISE)
_SIM_TYPE_ROLE_PREFIX = "sim_type_"
class ModerationStatus(aenum.NamedConstant):
"""States used by auth_role_moderation and UserRoleInvite"""
APPROVE = "approve"
CLARIFY = "clarify"
DENY = "deny"
PENDING = "pending"
VALID_SET = frozenset([APPROVE, CLARIFY, DENY, PENDING])
@classmethod
def check(cls, value):
assert (
value in cls.VALID_SET
), f"status={value} is not in valied_set={cls.VALID_SET}"
return value
def for_moderated_sim_types():
return [for_sim_type(s) for s in sirepo.feature_config.cfg().moderated_sim_types]
def for_new_user(is_guest):
if is_guest and pkconfig.channel_in("dev"):
return get_all()
return []
def for_proprietary_oauth_sim_types():
return [
for_sim_type(s) for s in sirepo.feature_config.cfg().proprietary_oauth_sim_types
]
def for_sim_type(sim_type):
return _SIM_TYPE_ROLE_PREFIX + sim_type
def get_all():
return [
for_sim_type(t) for t in sirepo.feature_config.auth_controlled_sim_types()
] + [
ROLE_ADM,
ROLE_PAYMENT_PLAN_ENTERPRISE,
ROLE_PAYMENT_PLAN_PREMIUM,
]
def sim_type(role):
return role[len(_SIM_TYPE_ROLE_PREFIX) :]
|
{
"content_hash": "8fcfa7a9dbe94a6add455b803ac25c29",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 25.075757575757574,
"alnum_prop": 0.6670694864048339,
"repo_name": "radiasoft/sirepo",
"id": "741cbad399a8380da01cd2f37806baf1b9bf2094",
"size": "1679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sirepo/auth_role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "152"
},
{
"name": "CSS",
"bytes": "65716"
},
{
"name": "HTML",
"bytes": "144600"
},
{
"name": "JavaScript",
"bytes": "3855752"
},
{
"name": "Jinja",
"bytes": "190763"
},
{
"name": "Jupyter Notebook",
"bytes": "1262"
},
{
"name": "Opal",
"bytes": "61806"
},
{
"name": "Perl",
"bytes": "31089"
},
{
"name": "Python",
"bytes": "3022923"
},
{
"name": "SCSS",
"bytes": "29855"
},
{
"name": "Shell",
"bytes": "21259"
}
],
"symlink_target": ""
}
|
import re
import sys
COMMAND = sys.argv.pop(0)
if len(sys.argv) != 2: sys.exit("usage: "+COMMAND+" gold-column-number guessed-column-number < file\n")
GOLDCOLUMN = int(sys.argv.pop(0))
GUESSEDCOLUMN = int(sys.argv.pop(0))
nbrOfClasses = {} # count per class
nbrOfGuesses = {} # count per guess
confusion = {} # confusion matrix: guesses per gold tag
# process classifications
for line in sys.stdin:
# remove final newline
line = line.rstrip()
# get the tokens on the line
tokens = line.split()
# sanity check
if (len(tokens) < GOLDCOLUMN) or (len(tokens) < GUESSEDCOLUMN):
sys.exit(COMMAND+": input line contains too few tokens: "+line+"\n")
# count gold tag
gold = tokens[GOLDCOLUMN-1]
if gold in nbrOfClasses: nbrOfClasses[gold] += 1
else:
nbrOfClasses[gold] = 1
confusion[gold] = {}
# add guess tag to counts, if necessary
guess = tokens[GUESSEDCOLUMN-1]
if not guess in nbrOfClasses:
nbrOfClasses[guess] = 0
confusion[guess] = {}
if guess in nbrOfGuesses: nbrOfGuesses[guess] += 1
else: nbrOfGuesses[guess] = 1
# add guess to confusion matrix
if guess in confusion[gold]: confusion[gold][guess] += 1
else: confusion[gold][guess] = 1
# show confusion matrix
patternFourChars = re.compile("....")
for gold in sorted(nbrOfClasses,key=nbrOfClasses.get,reverse=True):
# pretty print: create output string of at least four characters
outString = str(gold)
while not patternFourChars.match(outString): outString = " "+outString
sys.stdout.write(outString+":")
for guess in sorted(nbrOfClasses,key=nbrOfClasses.get,reverse=True):
if gold in confusion and guess in confusion[gold]:
# pretty print: create output string of at least four characters
outString = str(confusion[gold][guess])
while not patternFourChars.match(outString): outString = " "+outString
sys.stdout.write("&"+outString)
# just print a period for unseen gold-guessed combinations
else: sys.stdout.write("& .")
# print precision counts
outString = str(nbrOfClasses[gold])
while len(outString) < 4: outString = " "+outString
print "&"+outString+"\\\\"
# print recall counts
sys.stdout.write(" ")
for gold in sorted(nbrOfClasses,key=nbrOfClasses.get,reverse=True):
if not gold in nbrOfGuesses: outString = "0"
else: outString = str(nbrOfGuesses[gold])
while len(outString) < 4: outString = " "+outString
sys.stdout.write("&"+outString)
print "\\\\"
# done
sys.exit()
|
{
"content_hash": "20bd18653150fcfc1aca5e0757d90784",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 103,
"avg_line_length": 37.405797101449274,
"alnum_prop": 0.6667958155753584,
"repo_name": "online-behaviour/machine-learning",
"id": "2a075a82bd1947480a2317c3a6511c9d529b2767",
"size": "2779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confusion.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "100355"
},
{
"name": "Python",
"bytes": "101290"
},
{
"name": "Shell",
"bytes": "9843"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.test_util import TestCase
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
import numpy as np
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2)
brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500)
brew.relu(model, 'fc3', 'fc3')
brew.fc(model, 'fc3', 'pred', 500, 10)
brew.softmax(model, 'pred', 'out')
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
# Populate the workspace with data
np_data = np.random.rand(1, 1, 28, 28).astype(np.float32)
workspace.FeedBlob("data", np_data)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Fake "data" is populated by init_net, we have to replace it
workspace.FeedBlob("data", np_data)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([np_data])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
def test_mobile_exporter_datatypes(self):
model = ModelHelper(name="mobile_exporter_test_model")
model.Copy("data_int", "out")
model.params.append("data_int")
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
np_data_int = np.random.randint(100, size=(1, 1, 28, 28), dtype=np.int32)
workspace.FeedBlob("data_int", np_data_int)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
|
{
"content_hash": "cf296a0340f532188352e6b516747774",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 88,
"avg_line_length": 35.89915966386555,
"alnum_prop": 0.6308520599250936,
"repo_name": "Yangqing/caffe2",
"id": "0ae91da5300c3b467a986c192f7d2fce6d8c9519",
"size": "4943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/predictor/mobile_exporter_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3327"
},
{
"name": "C",
"bytes": "691775"
},
{
"name": "C++",
"bytes": "5773620"
},
{
"name": "CMake",
"bytes": "313982"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "2051079"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "15290"
},
{
"name": "Metal",
"bytes": "41257"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "253857"
},
{
"name": "Python",
"bytes": "3805476"
},
{
"name": "Shell",
"bytes": "73185"
}
],
"symlink_target": ""
}
|
"""ZHA device automation trigger tests."""
from datetime import timedelta
import time
import pytest
import zigpy.profiles.zha
import zigpy.zcl.clusters.general as general
import homeassistant.components.automation as automation
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.helpers.device_registry import async_get_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .common import async_enable_traffic
from tests.common import (
async_fire_time_changed,
async_get_device_automations,
async_mock_service,
)
ON = 1
OFF = 0
SHAKEN = "device_shaken"
COMMAND = "command"
COMMAND_SHAKE = "shake"
COMMAND_HOLD = "hold"
COMMAND_SINGLE = "single"
COMMAND_DOUBLE = "double"
DOUBLE_PRESS = "remote_button_double_press"
SHORT_PRESS = "remote_button_short_press"
LONG_PRESS = "remote_button_long_press"
LONG_RELEASE = "remote_button_long_release"
def _same_lists(list_a, list_b):
if len(list_a) != len(list_b):
return False
for item in list_a:
if item not in list_b:
return False
return True
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture
async def mock_devices(hass, zigpy_device_mock, zha_device_joined_restored):
"""IAS device fixture."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.Basic.cluster_id],
"out_clusters": [general.OnOff.cluster_id],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
)
zha_device = await zha_device_joined_restored(zigpy_device)
zha_device.update_available(True)
await hass.async_block_till_done()
return zigpy_device, zha_device
async def test_triggers(hass, mock_devices):
"""Test zha device triggers."""
zigpy_device, zha_device = mock_devices
zigpy_device.device_automation_triggers = {
(SHAKEN, SHAKEN): {COMMAND: COMMAND_SHAKE},
(DOUBLE_PRESS, DOUBLE_PRESS): {COMMAND: COMMAND_DOUBLE},
(SHORT_PRESS, SHORT_PRESS): {COMMAND: COMMAND_SINGLE},
(LONG_PRESS, LONG_PRESS): {COMMAND: COMMAND_HOLD},
(LONG_RELEASE, LONG_RELEASE): {COMMAND: COMMAND_HOLD},
}
ieee_address = str(zha_device.ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)}, set())
triggers = await async_get_device_automations(hass, "trigger", reg_device.id)
expected_triggers = [
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": "device_offline",
"subtype": "device_offline",
},
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": SHAKEN,
"subtype": SHAKEN,
},
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": DOUBLE_PRESS,
"subtype": DOUBLE_PRESS,
},
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": SHORT_PRESS,
"subtype": SHORT_PRESS,
},
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": LONG_PRESS,
"subtype": LONG_PRESS,
},
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": LONG_RELEASE,
"subtype": LONG_RELEASE,
},
]
assert _same_lists(triggers, expected_triggers)
async def test_no_triggers(hass, mock_devices):
"""Test zha device with no triggers."""
_, zha_device = mock_devices
ieee_address = str(zha_device.ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)}, set())
triggers = await async_get_device_automations(hass, "trigger", reg_device.id)
assert triggers == [
{
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": "device_offline",
"subtype": "device_offline",
}
]
async def test_if_fires_on_event(hass, mock_devices, calls):
"""Test for remote triggers firing."""
zigpy_device, zha_device = mock_devices
zigpy_device.device_automation_triggers = {
(SHAKEN, SHAKEN): {COMMAND: COMMAND_SHAKE},
(DOUBLE_PRESS, DOUBLE_PRESS): {COMMAND: COMMAND_DOUBLE},
(SHORT_PRESS, SHORT_PRESS): {COMMAND: COMMAND_SINGLE},
(LONG_PRESS, LONG_PRESS): {COMMAND: COMMAND_HOLD},
(LONG_RELEASE, LONG_RELEASE): {COMMAND: COMMAND_HOLD},
}
ieee_address = str(zha_device.ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)}, set())
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": SHORT_PRESS,
"subtype": SHORT_PRESS,
},
"action": {
"service": "test.automation",
"data": {"message": "service called"},
},
}
]
},
)
await hass.async_block_till_done()
channel = zha_device.channels.pools[0].client_channels["1:0x0006"]
channel.zha_send_event(COMMAND_SINGLE, [])
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["message"] == "service called"
async def test_device_offline_fires(
hass, zigpy_device_mock, zha_device_restored, calls
):
"""Test for device offline triggers firing."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.Basic.cluster_id],
"out_clusters": [general.OnOff.cluster_id],
"device_type": 0,
}
}
)
zha_device = await zha_device_restored(zigpy_device, last_seen=time.time())
await async_enable_traffic(hass, [zha_device])
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"device_id": zha_device.device_id,
"domain": "zha",
"platform": "device",
"type": "device_offline",
"subtype": "device_offline",
},
"action": {
"service": "test.automation",
"data": {"message": "service called"},
},
}
]
},
)
await hass.async_block_till_done()
assert zha_device.available is True
zigpy_device.last_seen = (
time.time() - zha_core_device.CONSIDER_UNAVAILABLE_BATTERY - 2
)
# there are 3 checkins to perform before marking the device unavailable
future = dt_util.utcnow() + timedelta(seconds=90)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
future = dt_util.utcnow() + timedelta(seconds=90)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
future = dt_util.utcnow() + timedelta(
seconds=zha_core_device.CONSIDER_UNAVAILABLE_BATTERY + 100
)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert zha_device.available is False
assert len(calls) == 1
assert calls[0].data["message"] == "service called"
async def test_exception_no_triggers(hass, mock_devices, calls, caplog):
"""Test for exception on event triggers firing."""
_, zha_device = mock_devices
ieee_address = str(zha_device.ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)}, set())
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": "junk",
"subtype": "junk",
},
"action": {
"service": "test.automation",
"data": {"message": "service called"},
},
}
]
},
)
await hass.async_block_till_done()
assert "Invalid config for [automation]" in caplog.text
async def test_exception_bad_trigger(hass, mock_devices, calls, caplog):
"""Test for exception on event triggers firing."""
zigpy_device, zha_device = mock_devices
zigpy_device.device_automation_triggers = {
(SHAKEN, SHAKEN): {COMMAND: COMMAND_SHAKE},
(DOUBLE_PRESS, DOUBLE_PRESS): {COMMAND: COMMAND_DOUBLE},
(SHORT_PRESS, SHORT_PRESS): {COMMAND: COMMAND_SINGLE},
(LONG_PRESS, LONG_PRESS): {COMMAND: COMMAND_HOLD},
(LONG_RELEASE, LONG_RELEASE): {COMMAND: COMMAND_HOLD},
}
ieee_address = str(zha_device.ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({("zha", ieee_address)}, set())
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": "junk",
"subtype": "junk",
},
"action": {
"service": "test.automation",
"data": {"message": "service called"},
},
}
]
},
)
await hass.async_block_till_done()
assert "Invalid config for [automation]" in caplog.text
|
{
"content_hash": "dd81b95bb6b9fdcb3c1c32f5179293d6",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 84,
"avg_line_length": 30.586894586894587,
"alnum_prop": 0.5408904619970194,
"repo_name": "sdague/home-assistant",
"id": "801b6831379ffa671d53ed09fc41fcf0198b28d7",
"size": "10736",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/zha/test_device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "27869189"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
}
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["get_quad_coeffs"]
import os
import shutil
import sqlite3
import logging
from tempfile import NamedTemporaryFile
from six.moves import urllib
from .config import KPLR_ROOT
DB_FILENAME = "ldcoeffs.db"
def get_quad_coeffs(teff=5778, logg=None, feh=None, data_root=None,
clobber=False):
"""
Get the quadratic coefficients for the standard Kepler limb-darkening
profile.
:param teff: (optional)
The effective temperature in degrees K.
:param logg: (optional)
The log10 surface gravity in cm/s/s.
:param feh: (optional)
The metallicity [Fe/H].
:param data_root: (optional)
The local base directory where the grids will be downloaded to. This
can also be set using the ``KPLR_ROOT`` environment variable. The
default value is ``~/.kplr``.
:param clobber: (optional)
Should the database file be overwritten even if it already exists?
(default: False)
"""
assert teff is not None
# Make sure that the database is saved locally.
filename = download_database(data_root=data_root, clobber=clobber)
# Construct the SQL query.
q = """
SELECT mu1,mu2 FROM claret11 WHERE
teff=(SELECT teff FROM claret11 ORDER BY abs(teff-?) LIMIT 1)
ORDER BY (logg-?) * (logg-?) + (feh-?) * (feh-?) LIMIT 1
"""
pars = [teff, logg, logg, feh, feh]
# Execute the command.
with sqlite3.connect(filename) as conn:
c = conn.cursor()
rows = c.execute(q, pars)
mu1, mu2 = rows.fetchone()
return mu1, mu2
def download_database(data_root=None, clobber=False):
"""
Download a SQLite database containing the limb darkening coefficients
computed by `Claret & Bloemen (2011)
<http://adsabs.harvard.edu/abs/2011A%26A...529A..75C>`_. The table is
available online on `Vizier
<http://vizier.cfa.harvard.edu/viz-bin/VizieR?-source=J/A+A/529/A75>`_.
Using the ASCII data table, the SQLite database was generated with the
following Python commands:
.. code-block:: python
import sqlite3
import numpy as np
with sqlite3.connect("ldcoeffs.db") as conn:
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS claret11 "
"(teff REAL, logg REAL, feh REAL, veloc REAL, mu1 REAL, "
"mu2 REAL)")
data = np.loadtxt("claret11.txt", skiprows=59, delimiter="|",
usecols=range(1, 7))
c.executemany("INSERT INTO claret11 (logg,teff,feh,veloc,mu1,mu2) "
"VALUES (?,?,?,?,?,?)", data)
"""
# Figure out the local filename for the database.
if data_root is None:
data_root = KPLR_ROOT
filename = os.path.join(data_root, DB_FILENAME)
if not clobber and os.path.exists(filename):
return filename
# Make sure that the target directory exists.
try:
os.makedirs(data_root)
except os.error:
pass
# MAGIC: specify the URL for the remote file.
url = "http://bbq.dfm.io/~dfm/ldcoeffs.db"
# Fetch the database from the server.
logging.info("Downloading file from: '{0}'".format(url))
r = urllib.request.Request(url)
handler = urllib.request.urlopen(r)
code = handler.getcode()
if int(code) != 200:
raise RuntimeError("Couldn't download file from {0}. Returned: {1}"
.format(url, code))
# Save the contents of the file.
logging.info("Saving file to: '{0}'".format(filename))
# Atomically write to disk.
# http://stackoverflow.com/questions/2333872/ \
# atomic-writing-to-file-with-python
f = NamedTemporaryFile("wb", delete=False)
f.write(handler.read())
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(f.name, filename)
return filename
|
{
"content_hash": "e80178f3ab17df02fadee2615b590297",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 79,
"avg_line_length": 30.50381679389313,
"alnum_prop": 0.6191191191191191,
"repo_name": "evanbiederstedt/kplr",
"id": "160419fb69ae881159cde284614631af2b7351e2",
"size": "4043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kplr/ld.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66801"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
}
|
"""This module, nexus_db.py, acts as an API to the NexusDB {MySQL}."""
import pika
import json
import time
from scripts.docker.wait_for_rabbit_host import WaitForRabbitMQHost
import mysql.connector
print('TODO: Run NexusDB!')
wait = WaitForRabbitMQHost()
wait.wait_for_connection()
print('MAKING A CONNECTION DB!')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbit_manager'))
print('CONNECTION MADE DB')
channel = connection.channel()
print('CHANNEL MADE!! DB')
#channel.queue_declare(queue='exchange_nexus_courier', durable=True, auto_delete=True)
print('QUEUE DECLARED DB')
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
print(body)
#print(type(body))
#print(body.decode("utf-8"))
m = body.decode('utf-8')
m = eval(m)
#print(m)
#print(type(m))
data = m['d']
#for table in data
#message2 = {'TEST_RESPONSE': body.decode("utf-8")}
message2 = {'m2': 'saved!'}
channel.basic_publish(exchange = '',
routing_key = 'queue_nexus_db',
body = json.dumps(message2),
properties = pika.BasicProperties(
delivery_mode = 2, # make message persistent
)
)
print(" [x] Sent 'Hello World!'")
channel.basic_consume(callback,
queue='queue_nexus_db',
no_ack=False)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
{
"content_hash": "6db7c504a7fd3524bb474747cc254bf8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 21.875,
"alnum_prop": 0.593015873015873,
"repo_name": "utarsuno/quasar_source",
"id": "a44987c449467af5ec983d160e8013f1006bdaf8",
"size": "1591",
"binary": false,
"copies": "1",
"ref": "refs/heads/entire_codebase_refactor",
"path": "deprecated/nexus_db/nexus_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "57104"
},
{
"name": "C++",
"bytes": "32307"
},
{
"name": "CSS",
"bytes": "540"
},
{
"name": "Dockerfile",
"bytes": "11914"
},
{
"name": "GLSL",
"bytes": "14985"
},
{
"name": "Groovy",
"bytes": "77"
},
{
"name": "HTML",
"bytes": "489"
},
{
"name": "JavaScript",
"bytes": "446658"
},
{
"name": "PHP",
"bytes": "416209"
},
{
"name": "Python",
"bytes": "191514"
},
{
"name": "Shell",
"bytes": "7916"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from examples.data.data_loader import _DEFAULT_IMAGE_SIZE, _NUM_CHANNELS
from examples.utils import get_tfkeras_model
def get_resnet_model(resnet_depth: str = "50", resnet_version: str = "v1") -> tf.keras.Model:
"""
Creates a native tf.keras ResNet model.
Args:
resnet_depth (str): ResNet depth. Options=[50 (default), 101, 152].
resnet_version (str): ResNet version. Options=[v1 (default), v2].
Returns:
model (tf.keras.Model): model corresponding to 'resnet_depth' and 'resnet_version'.
"""
shape = (
_DEFAULT_IMAGE_SIZE["resnet_{}".format(resnet_version)],
_DEFAULT_IMAGE_SIZE["resnet_{}".format(resnet_version)],
_NUM_CHANNELS,
)
model_name = "resnet_" + resnet_depth + resnet_version
model = get_tfkeras_model(model_name=model_name, shape=shape)
return model
|
{
"content_hash": "97a541b9ea0e84284c7cc0e5fb7ad4b7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 93,
"avg_line_length": 32.7037037037037,
"alnum_prop": 0.6534541336353341,
"repo_name": "NVIDIA/TensorRT",
"id": "3044d8854ccefbf39f6c4b21c75b7e1851bdf97d",
"size": "1573",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/tensorflow-quantization/examples/resnet/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "804"
},
{
"name": "C",
"bytes": "26267"
},
{
"name": "C++",
"bytes": "174835683"
},
{
"name": "CMake",
"bytes": "73882"
},
{
"name": "Cuda",
"bytes": "713094"
},
{
"name": "Dockerfile",
"bytes": "21378"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Jupyter Notebook",
"bytes": "2284036"
},
{
"name": "Makefile",
"bytes": "9128"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "PureBasic",
"bytes": "388"
},
{
"name": "Python",
"bytes": "2541976"
},
{
"name": "Shell",
"bytes": "20007"
}
],
"symlink_target": ""
}
|
import webapp2
from pale.adapters import webapp2 as pale_webapp2_adapter
from pale.config import authenticator, context_creator
from tests.example_app import api
@authenticator
def authenticate_pale_context(context):
"""Don't actually authenticate anything in this test."""
return context
@context_creator
def create_pale_context(endpoint,request):
return pale_webapp2_adapter.DefaultWebapp2Context(endpoint, request)
def create_pale_webapp2_app():
"""Creates a webapp2 WSGIApplication bound to pale."""
app = webapp2.WSGIApplication(debug=True)
pale_webapp2_adapter.bind_pale_to_webapp2(api,
app,
route_prefix='/api')
return app
|
{
"content_hash": "f4904af63b5d828e81663fe47d4d2d00",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 27.56,
"alnum_prop": 0.7387518142235123,
"repo_name": "Loudr/pale",
"id": "6b6a2ce9fb731d0ece6bc5a05667c5c14cf1de69",
"size": "689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/example_app/webapp2_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175823"
},
{
"name": "Vim script",
"bytes": "49"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import os.path
from cornice.service import Service
from pyramid.httpexceptions import HTTPBadGateway, HTTPBadRequest, HTTPNotFound
from json.decoder import JSONDecodeError
from cornice.validators import colander_body_validator
from .. import supervisor
from ..config import path
from ..device_discovery import PhilipsHueBridgeApiClient, UnauthenticatedDeviceError, UpstreamError
from ..lockfile import open_locked
from .api_descriptions import descriptions as desc
logger = logging.getLogger(__name__)
list_service = Service(
name='devices_list',
path=path('setup/devices'),
description=desc.get('list_service'),
renderer='json',
)
@list_service.get()
def devices_list_view(request):
"""
Returns list of discovered devices/bridges.
"""
return read_json(request.registry.settings['devices_path'], [])
discover_service = Service(
name='devices_discover',
path=path('setup/devices/discover'),
renderer='json',
accept='application/json',
)
@discover_service.post(validators=(colander_body_validator,))
def devices_discover_view(request):
"""
Trigger device discovery daemon restart to force a new device
scan.
"""
logger.info("Restarting device discovery daemon...")
supervisor.restart_program('device_discovery')
authenticate_service = Service(
name='devices_authenticate',
path=path('setup/devices/{device_id:[a-z0-9]+}/authenticate'),
renderer='json',
accept='application/json',
)
@authenticate_service.post(validators=(colander_body_validator,))
def devices_authenticate_view(request):
device_id = request.matchdict["device_id"]
logger.debug("Authenticating device with ID=%s", device_id)
device_list_path = request.registry.settings['devices_path']
device = get_device(device_list_path, device_id)
if not device["authenticationRequired"]:
return {"id": device_id, "authenticated": True}
senic_hub_data_path = request.registry.settings.get(
"senic_hub_data_path", "/data/senic-hub"
)
phue_bridge_config = os.path.join(senic_hub_data_path, '{}.conf'.format(device["id"]))
config = read_json(phue_bridge_config, {})
username = config.get(device["ip"], {}).get("username")
bridge = PhilipsHueBridgeApiClient(device["ip"], username)
if not bridge.is_authenticated():
username = bridge.authenticate()
if username:
config[device["ip"]] = {"username": username}
else:
config.pop(device["ip"], None)
authenticated = username is not None
device["authenticated"] = authenticated
with open(phue_bridge_config, "w") as f:
json.dump(config, f)
update_device(device, request.registry.settings, username)
return {"id": device_id, "authenticated": authenticated}
details_service = Service(
name='devices_details',
path=path('setup/devices/{device_id:(?!discover)[a-z0-9]+}'),
renderer='json',
)
@details_service.get()
def devices_details_view(request):
device_id = request.matchdict["device_id"]
logger.debug("Getting details for device with ID=%s", device_id)
device_list_path = request.registry.settings['devices_path']
device = get_device(device_list_path, device_id)
# Only Philips Hue has device details at the moment
# TODO: In the future, each single Philips Hue light should be returned as a regular
# device. Philips Hue bridges should be flagged with `virtual: True` since they
# are not controlled by the user. However, all its lights should be returned as
# well when requesting `/devices`.
if device['type'] != 'philips_hue':
return {}
senic_hub_data_path = request.registry.settings.get(
"senic_hub_data_path", "/data/senic-hub"
)
phue_bridge_config = os.path.join(senic_hub_data_path, '{}.conf'.format(device["id"]))
config = read_json(phue_bridge_config, {})
username = config.get(device["ip"], {}).get("username")
try:
bridge = PhilipsHueBridgeApiClient(device["ip"], username)
return bridge.get_lights()
# TODO create a tween to handle exceptions for all views
except UnauthenticatedDeviceError as e:
raise HTTPBadRequest(e.message)
except UpstreamError as e:
raise HTTPBadGateway(e.message)
def get_device(device_list_path, device_id):
if not os.path.exists(device_list_path):
raise HTTPNotFound("Device discovery was not run...")
with open_locked(device_list_path, 'r') as f:
devices = json.load(f)
device = next((x for x in devices if x["id"] == device_id), None)
if device is None:
raise HTTPNotFound("Device with id = {} not found...".format(device_id))
return device
def update_device(device, settings, username): # pragma: no cover
try:
with open_locked(settings['devices_path'], 'r') as f:
devices = json.loads(f.read())
except (FileNotFoundError, JSONDecodeError):
# if we don't have the devices.json file, there are no devices to
# authenticate
return
device["extra"]["username"] = username
if device['authenticated'] and username:
bridge = PhilipsHueBridgeApiClient(device["ip"], username)
device['extra']['lights'] = bridge.get_lights()
bridge_config = bridge.get_config()
device['extra']['bridge'] = {
key: value for key, value in bridge_config.items()
if key in ('swversion', 'apiversion', 'datastoreversion', 'mac',)
}
device_index = [
i for (i, d) in enumerate(devices) if d["id"] == device["id"]
].pop()
devices[device_index] = device
with open_locked(settings['devices_path'], 'w') as f:
json.dump(devices, f)
def read_json(file_path, default=None):
try:
with open_locked(file_path, 'r') as f:
return json.load(f)
except (FileNotFoundError, JSONDecodeError):
return default
|
{
"content_hash": "d127618e0aa033ed69e04d47a8f7d0d6",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 99,
"avg_line_length": 30.850515463917525,
"alnum_prop": 0.6688387635756057,
"repo_name": "getsenic/senic-hub",
"id": "8b55867fda8bf950e068261a50bd685105e7a67e",
"size": "5985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senic_hub/backend/views/setup_devices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2426"
},
{
"name": "Python",
"bytes": "255261"
},
{
"name": "Shell",
"bytes": "74338"
},
{
"name": "Vim script",
"bytes": "7381"
}
],
"symlink_target": ""
}
|
import datetime
import mock
from testtools import matchers
from ironic.common import exception
from ironic import objects
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
from ironic.tests.unit.objects import utils as obj_utils
class TestPortObject(base.DbTestCase):
def setUp(self):
super(TestPortObject, self).setUp()
self.fake_port = utils.get_test_port()
def test_get_by_id(self):
port_id = self.fake_port['id']
with mock.patch.object(self.dbapi, 'get_port_by_id',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
port = objects.Port.get(self.context, port_id)
mock_get_port.assert_called_once_with(port_id)
self.assertEqual(self.context, port._context)
def test_get_by_uuid(self):
uuid = self.fake_port['uuid']
with mock.patch.object(self.dbapi, 'get_port_by_uuid',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
port = objects.Port.get(self.context, uuid)
mock_get_port.assert_called_once_with(uuid)
self.assertEqual(self.context, port._context)
def test_get_by_address(self):
address = self.fake_port['address']
with mock.patch.object(self.dbapi, 'get_port_by_address',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
port = objects.Port.get(self.context, address)
mock_get_port.assert_called_once_with(address)
self.assertEqual(self.context, port._context)
def test_get_bad_id_and_uuid_and_address(self):
self.assertRaises(exception.InvalidIdentity,
objects.Port.get, self.context, 'not-a-uuid')
def test_save(self):
uuid = self.fake_port['uuid']
address = "b2:54:00:cf:2d:40"
test_time = datetime.datetime(2000, 1, 1, 0, 0)
with mock.patch.object(self.dbapi, 'get_port_by_uuid',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
with mock.patch.object(self.dbapi, 'update_port',
autospec=True) as mock_update_port:
mock_update_port.return_value = (
utils.get_test_port(address=address, updated_at=test_time))
p = objects.Port.get_by_uuid(self.context, uuid)
p.address = address
p.save()
mock_get_port.assert_called_once_with(uuid)
mock_update_port.assert_called_once_with(
uuid, {'address': "b2:54:00:cf:2d:40"})
self.assertEqual(self.context, p._context)
res_updated_at = (p.updated_at).replace(tzinfo=None)
self.assertEqual(test_time, res_updated_at)
def test_refresh(self):
uuid = self.fake_port['uuid']
returns = [self.fake_port,
utils.get_test_port(address="c3:54:00:cf:2d:40")]
expected = [mock.call(uuid), mock.call(uuid)]
with mock.patch.object(self.dbapi, 'get_port_by_uuid',
side_effect=returns,
autospec=True) as mock_get_port:
p = objects.Port.get_by_uuid(self.context, uuid)
self.assertEqual("52:54:00:cf:2d:31", p.address)
p.refresh()
self.assertEqual("c3:54:00:cf:2d:40", p.address)
self.assertEqual(expected, mock_get_port.call_args_list)
self.assertEqual(self.context, p._context)
def test_save_after_refresh(self):
# Ensure that it's possible to do object.save() after object.refresh()
address = "b2:54:00:cf:2d:40"
db_node = utils.create_test_node()
db_port = utils.create_test_port(node_id=db_node.id)
p = objects.Port.get_by_uuid(self.context, db_port.uuid)
p_copy = objects.Port.get_by_uuid(self.context, db_port.uuid)
p.address = address
p.save()
p_copy.refresh()
p_copy.address = 'aa:bb:cc:dd:ee:ff'
# Ensure this passes and an exception is not generated
p_copy.save()
def test_list(self):
with mock.patch.object(self.dbapi, 'get_port_list',
autospec=True) as mock_get_list:
mock_get_list.return_value = [self.fake_port]
ports = objects.Port.list(self.context)
self.assertThat(ports, matchers.HasLength(1))
self.assertIsInstance(ports[0], objects.Port)
self.assertEqual(self.context, ports[0]._context)
def test_payload_schemas(self):
"""Assert that the port's Payload SCHEMAs have the expected properties.
A payload's SCHEMA should:
1. Have each of its keys in the payload's fields
2. Have each member of the schema match with a corresponding field
in the Port object
"""
payloads = obj_utils.get_payloads_with_schemas(objects.port)
for payload in payloads:
for schema_key in payload.SCHEMA:
self.assertIn(schema_key, payload.fields,
"for %s, schema key %s is not in fields"
% (payload, schema_key))
port_key = payload.SCHEMA[schema_key][1]
self.assertIn(port_key, objects.Port.fields,
"for %s, schema key %s has invalid port field %s"
% (payload, schema_key, port_key))
|
{
"content_hash": "2ab10104711c7ee5d435bf2b8a49047e",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 42.46268656716418,
"alnum_prop": 0.5766256590509666,
"repo_name": "ruyang/ironic",
"id": "de5adb162738d2d0a3aac05d24de6354de5104c2",
"size": "6282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/objects/test_port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5133461"
},
{
"name": "Shell",
"bytes": "107097"
}
],
"symlink_target": ""
}
|
from instruction_set_parser import is_parser
import unittest
ISP = is_parser.InstructionSetParser
class HexToDecimalTestCase(unittest.TestCase):
"""Tests for the hexadecimal part of the _replace_numerals function"""
def test_simple_literal(self):
self.assertEquals('2', ISP._replace_numerals('0x0002'))
def test_uppercase_literal(self):
self.assertEquals('243', ISP._replace_numerals('0XF3'))
def test_lowercase_literal(self):
self.assertEquals('31', ISP._replace_numerals('0x1f'))
def test_mixedcase_literal(self):
self.assertEquals('175', ISP._replace_numerals('0xaF'))
def test_random_literal_position(self):
self.assertEquals('FIERO69 VERY FIERO', ISP._replace_numerals('FIERO0x45 VERY FIERO'))
def test_complex_scenario(self):
self.assertEquals('FIERO69 SPACE VERY FIERO 84', ISP._replace_numerals('FIERO0x45 SPACE VERY FIERO 0X54'))
class BinaryToDecimalTestCases(unittest.TestCase):
"""Tests for the binary part of the _replace_numerals function"""
def test_leading_zeroes(self):
self.assertEquals('1', ISP._replace_numerals('0b0001'))
def test_trailing_zeroes(self):
self.assertEquals('4', ISP._replace_numerals('0b100'))
def test_uppercase_literal(self):
self.assertEquals('6', ISP._replace_numerals('0B110'))
def test_random_literal_position(self):
self.assertEquals('FIERO69 VERY FIERO', ISP._replace_numerals('FIERO0b1000101 VERY FIERO'))
def test_complex_scenario(self):
self.assertEquals('FIERO1 SPACE VERY FIERO 2', ISP._replace_numerals('FIERO0b01 SPACE VERY FIERO 0b10'))
class KeywordTestCases(unittest.TestCase):
"""Tests for the _parse_line function"""
PA = None
def setUp(self):
self.PA = is_parser.ProcessorArchitecture()
# Binary and hex tests not performed since the conversion is performed before keywords are parsed
def test_word_size_regular(self):
ISP._parse_line(0, 'WORD_SIZE(6)', self.PA)
self.assertEquals(6, self.PA.WORD_SIZE)
def test_word_size_random_spacing(self):
ISP._parse_line(0, ' WORD_SIZE ( 16 ) ', self.PA)
self.assertEquals(16, self.PA.WORD_SIZE)
def test_instruction_regular(self):
ISP._parse_line(0, 'INSTRUCTION(1, 1, \'LD B,_A\')', self.PA)
expected = is_parser.Instruction(1, 1, 'LD B,_A')
self.assertEquals(expected, self.PA.INSTRUCTION_SET[-1])
def test_instruction_random_spacing(self):
ISP._parse_line(0, ' INSTRUCTION (5 , 2 , \'JP M,_$address\' )', self.PA)
expected = is_parser.Instruction(5, 2, 'JP M,_$address')
self.assertEquals(expected, self.PA.INSTRUCTION_SET[-1])
def suite():
htd_suite = unittest.TestLoader().loadTestsFromTestCase(HexToDecimalTestCase)
btd_suite = unittest.TestLoader().loadTestsFromTestCase(BinaryToDecimalTestCases)
keyword_suite = unittest.TestLoader().loadTestsFromTestCase(KeywordTestCases)
return unittest.TestSuite([htd_suite, btd_suite, keyword_suite])
|
{
"content_hash": "6d4c4b9f51442383e99d868d1ba1383f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 114,
"avg_line_length": 38.1625,
"alnum_prop": 0.68948575171962,
"repo_name": "G3Kappa/kasmc",
"id": "7d91febca810bd50b1925e8ce805cb036746a132",
"size": "3053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parser_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36152"
}
],
"symlink_target": ""
}
|
from rtfng.utils import RTFTestCase
from rtfng.Elements import Document
from rtfng.document.base import RawCode
from rtfng.document.paragraph import Paragraph
from rtfng.document.section import Section
from rtfng.object.picture import Image
class PictureTestCase(RTFTestCase):
def make_pictures():
doc, section, styles = RTFTestCase.initializeDoc()
# text can be added directly to the section a paragraph object is create as needed
section.append( 'Image Example 1' )
section.append( 'You can add images in one of two ways, either converting the '
'image each and every time like;' )
image = Image( 'examples/image.jpg' )
section.append( Paragraph( image ) )
section.append( 'Or you can use the image object to convert the image and then '
'save it to a raw code element that can be included later.' )
# Test RawCode -- split into separate test?
rawCodeDecl = image.ToRawCode('TEST_IMAGE')
assert rawCodeDecl.startswith('TEST_IMAGE = RawCode( """')
assert rawCodeDecl.endswith('""" )')
rawCode = RawCode(image.Data)
section.append(rawCode)
section.append('The above picture was displayed from a RawCode object without a Paragraph wrapper.')
section.append( 'Here are some png files' )
for f in [ 'examples/img1.png',
'examples/img2.png',
'examples/img3.png',
'examples/img4.png' ] :
section.append( Paragraph( Image( f ) ) )
return doc
make_pictures = staticmethod(make_pictures)
def test_pictures(self):
self.doTest()
|
{
"content_hash": "fad4729b45f2776782c1fe75a06097d2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 108,
"avg_line_length": 35.08163265306123,
"alnum_prop": 0.6340895869691682,
"repo_name": "nekstrom/pyrtf-ng",
"id": "4cda5a7f03f7d6ee5c2c0e656e470f69a6668f33",
"size": "1719",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/objects/test_pictures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "137316"
},
{
"name": "Shell",
"bytes": "4891"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from future.builtins.disabled import *
from future.builtins import *
from future.standard_library import install_aliases
install_aliases()
# pylint: enable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
from .arc_py_ext_error import ArcPyExtError
class MapLayerError(ArcPyExtError):
"""ArcPyExt exception for errors involving map layers."""
def __init__(self, message, layer, innerError = None):
super(MapLayerError, self).__init__(message, innerError)
self._layer = layer
@property
def layer(self):
return self._layer
|
{
"content_hash": "337551d1a0ead3a7122e90f553ed3957",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 96,
"avg_line_length": 36.21052631578947,
"alnum_prop": 0.7296511627906976,
"repo_name": "DavidWhittingham/arcpyext",
"id": "99a05d4d5fcc4873a6baf64be9d1cf827d2f088c",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "arcpyext/exceptions/map_layer_error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "254777"
}
],
"symlink_target": ""
}
|
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('format15.xlsx')
def test_create_file_zero_number_format(self):
"""Test the creation of a simple XlsxWriter file 0 number format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'bold': 1})
format2 = workbook.add_format({'bold': 1, 'num_format': 0})
worksheet.write('A1', 1, format1)
worksheet.write('A2', 2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_zero_number_format_string(self):
"""Test the creation of a simple XlsxWriter file 0 number format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'bold': 1})
format2 = workbook.add_format({'bold': 1, 'num_format': '0'})
worksheet.write('A1', 1, format1)
worksheet.write('A2', 2, format2)
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "d5ec68835dedf95a09f92457600509e6",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 27.127659574468087,
"alnum_prop": 0.6352941176470588,
"repo_name": "jmcnamara/XlsxWriter",
"id": "c8387464660617f47e55d484da9c1b0821a19ca5",
"size": "1487",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_format15.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
import gym
from typing import List, Any
TaskType = Any # Can be different types depending on env, e.g., int or dict
class TaskSettableEnv(gym.Env):
"""
Extension of gym.Env to define a task-settable Env.
Your env must implement this interface in order to be used with MAML.
For curriculum learning, you can add this API to your env such that
the `env_task_fn` can set the next task as needed.
Supports:
- Sampling from a distribution of tasks for meta-learning.
- Setting the env to any task it supports.
- Getting the current task this env has been set to.
Examples:
>>> env = TaskSettableEnv(...)
>>> ...
>>> Trainer.workers.foreach_env(lambda base_env: base_env.my_prop)
"""
def sample_tasks(self, n_tasks: int) -> List[TaskType]:
"""Samples task of the meta-environment
Args:
n_tasks (int) : number of different meta-tasks needed
Returns:
tasks (list) : an (n_tasks) length list of tasks
"""
raise NotImplementedError
def set_task(self, task: TaskType) -> None:
"""Sets the specified task to the current environment
Args:
task: task of the meta-learning environment
"""
raise NotImplementedError
def get_task(self) -> TaskType:
"""Gets the task that the agent is performing in the current environment
Returns:
task: task of the meta-learning environment
"""
raise NotImplementedError
|
{
"content_hash": "013f32afdb57fe964f800ff530f9fd58",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 29.51923076923077,
"alnum_prop": 0.6306188925081433,
"repo_name": "pcmoritz/ray-1",
"id": "003c79293749da11628a61a2a4cc4e10644cf3fb",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/env/apis/task_settable_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
from clearstate.settings import ProdConfig
from clearstate.assets import assets
from clearstate.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
gravatar,
babel,
)
from clearstate import public, user, page
def create_app(config_object=ProdConfig):
'''An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
'''
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_before_handlers(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
gravatar.init_app(app)
babel.init_app(app)
babel.timezoneselector(page.views.get_timezone_from_page)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(page.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_before_handlers(app):
return None
|
{
"content_hash": "cb0fa5ebb225a95ee48b5decf8a594d6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 26.208955223880597,
"alnum_prop": 0.6981776765375854,
"repo_name": "sharoonthomas/clearstate",
"id": "4aae1e85aea191b2c0bb08e83e2ae87bfec5da87",
"size": "1780",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "clearstate/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7754"
},
{
"name": "HTML",
"bytes": "38746"
},
{
"name": "JavaScript",
"bytes": "182377"
},
{
"name": "Python",
"bytes": "48679"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import json
import re
import thread
from debug_toolbar.toolbar import DebugToolbar
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_text
class ToolbarCache(object):
def __init__(self):
self._toolbars = {}
def create(self, request):
toolbar = DebugToolbar(request)
self._toolbars[thread.get_ident()] = toolbar
return toolbar
def pop(self):
return self._toolbars.pop(thread.get_ident(), None)
def get(self):
return self._toolbars.get(thread.get_ident(), None)
toolbar_cache = ToolbarCache()
class DebugMiddleware(object):
_body_regexp = re.compile(re.escape('</body>'), flags=re.IGNORECASE)
def show_toolbar(self, request):
# TODO(dcramer): support VPN via INTERNAL_IPS + ipaddr maps
if not settings.SENTRY_DEBUGGER:
return False
if not request.is_superuser():
return False
if 'text/html' not in request.META.get('HTTP_ACCEPT', '*/*'):
return False
return True
def process_request(self, request):
# Decide whether the toolbar is active for this request.
if not self.show_toolbar(request):
return
toolbar = toolbar_cache.create(request)
# Activate instrumentation ie. monkey-patch.
for panel in toolbar.enabled_panels:
panel.enable_instrumentation()
# Run process_request methods of panels like Django middleware.
response = None
for panel in toolbar.enabled_panels:
response = panel.process_request(request)
if response:
break
return response
def process_view(self, request, view_func, view_args, view_kwargs):
toolbar = toolbar_cache.get()
if not toolbar:
return
# Run process_view methods of panels like Django middleware.
response = None
for panel in toolbar.enabled_panels:
response = panel.process_view(request, view_func, view_args, view_kwargs)
if response:
break
def process_response(self, request, response):
toolbar = toolbar_cache.pop()
if not toolbar:
return response
# Run process_response methods of panels like Django middleware.
for panel in reversed(toolbar.enabled_panels):
new_response = panel.process_response(request, response)
if new_response:
response = new_response
# Deactivate instrumentation ie. monkey-unpatch. This must run
# regardless of the response. Keep 'return' clauses below.
# (NB: Django's model for middleware doesn't guarantee anything.)
for panel in reversed(toolbar.enabled_panels):
panel.disable_instrumentation()
# Collapse the toolbar by default if SHOW_COLLAPSED is set.
if 'djdt' in request.COOKIES:
response.delete_cookie('djdt')
content = force_text(response.content, encoding='utf-8')
if 'text/html' not in response['Content-Type']:
if 'application/json' in response['Content-Type']:
content = json.dumps(json.loads(content), indent=2)
content = render_to_string('debug_toolbar/wrapper.html', {
'content': content,
})
response['Content-Type'] = 'text/html'
# Insert the toolbar in the response.
bits = self._body_regexp.split(content)
if len(bits) > 1:
bits[-2] += toolbar.render_toolbar()
content = '</body>'.join(bits)
response.content = content
response['Content-Length'] = len(content)
return response
|
{
"content_hash": "49d1ebb4f9ab3466d15933a2da2b5ef8",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 85,
"avg_line_length": 33.67256637168141,
"alnum_prop": 0.621550591327201,
"repo_name": "nicholasserra/sentry",
"id": "bbd0a73dcf26e8fbfcc8eb3f43ff03fbb151a98a",
"size": "3805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/debug/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174940"
},
{
"name": "HTML",
"bytes": "199996"
},
{
"name": "JavaScript",
"bytes": "609445"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "4816"
},
{
"name": "Python",
"bytes": "8613631"
}
],
"symlink_target": ""
}
|
"""
This class contains the class on which all the commands will be based
"""
class Command(object):
def __init__(self, id, command, summary, votes, url):
self.id = id
self.command = command
self.summary = summary
self.votes = votes
self.url = url
def __repr__(self):
return "{}(id={}, command={}, summary={}, votes={}, url={})".format(
self.__class__.__name__,
self.id,
self.command,
self.summary,
self.votes,
self.url
)
|
{
"content_hash": "821ea3804d62f61af8c353e97b14060c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 24.434782608695652,
"alnum_prop": 0.501779359430605,
"repo_name": "ncrocfer/clf",
"id": "2c366719d64d79d94bfa89a458cdb23e20f82d9c",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clf/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16007"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
import requests
import six
from six.moves.urllib import parse as urlparse
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class OpencontrailAPIFailed(Exception):
pass
class AnalyticsAPIBaseClient(object):
"""Opencontrail Base Statistics REST API Client."""
def __init__(self, endpoint, username, password, domain, verify_ssl=True):
self.endpoint = endpoint
self.username = username
self.password = password
self.domain = domain
self.verify_ssl = verify_ssl
self.sid = None
def authenticate(self):
path = '/authenticate'
data = {'username': self.username,
'password': self.password,
'domain': self.domain}
req_params = self._get_req_params(data=data)
url = urlparse.urljoin(self.endpoint, path)
resp = requests.post(url, **req_params)
if resp.status_code != 302:
raise OpencontrailAPIFailed(
_('Opencontrail API returned %(status)s %(reason)s') %
{'status': resp.status_code, 'reason': resp.reason})
self.sid = resp.cookies['connect.sid']
def request(self, path, fqdn_uuid, data, retry=True):
if not self.sid:
self.authenticate()
if not data:
data = {'fqnUUID': fqdn_uuid}
else:
data['fqnUUID'] = fqdn_uuid
req_params = self._get_req_params(data=data,
cookies={'connect.sid': self.sid})
url = urlparse.urljoin(self.endpoint, path)
self._log_req(url, req_params)
resp = requests.get(url, **req_params)
self._log_res(resp)
# it seems that the sid token has to be renewed
if resp.status_code == 302:
self.sid = 0
if retry:
return self.request(path, fqdn_uuid, data,
retry=False)
if resp.status_code != 200:
raise OpencontrailAPIFailed(
_('Opencontrail API returned %(status)s %(reason)s') %
{'status': resp.status_code, 'reason': resp.reason})
return resp
def _get_req_params(self, params=None, data=None, cookies=None):
req_params = {
'headers': {
'Accept': 'application/json'
},
'data': data,
'verify': self.verify_ssl,
'allow_redirects': False,
'cookies': cookies
}
return req_params
@staticmethod
def _log_req(url, req_params):
if not CONF.debug:
return
curl_command = ['REQ: curl -i -X GET ']
params = []
for name, value in six.iteritems(req_params['data']):
params.append("%s=%s" % (name, value))
curl_command.append('"%s?%s" ' % (url, '&'.join(params)))
for name, value in six.iteritems(req_params['headers']):
curl_command.append('-H "%s: %s" ' % (name, value))
LOG.debug(''.join(curl_command))
@staticmethod
def _log_res(resp):
if not CONF.debug:
return
dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version,
resp.status_code,
resp.reason)]
dump.extend('%s: %s\n' % (k, v)
for k, v in six.iteritems(resp.headers))
dump.append('\n')
if resp.content:
dump.extend([resp.content, '\n'])
LOG.debug(''.join(dump))
class NetworksAPIClient(AnalyticsAPIBaseClient):
"""Opencontrail Statistics REST API Client."""
def get_port_statistics(self, fqdn_uuid):
"""Get port statistics of a network
URL:
/tenant/networking/virtual-machines/details
PARAMS:
fqdnUUID=fqdn_uuid
type=vn
"""
path = '/api/tenant/networking/virtual-machines/details'
resp = self.request(path, fqdn_uuid, {'type': 'vn'})
return resp.json()
class Client(object):
def __init__(self, endpoint, username, password, domain, verify_ssl=True):
self.networks = NetworksAPIClient(endpoint, username, password,
domain, verify_ssl)
|
{
"content_hash": "314cd3b86bee7895b41ef274cf488a37",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 78,
"avg_line_length": 29.85135135135135,
"alnum_prop": 0.5427795382526029,
"repo_name": "froyobin/ceilometer",
"id": "6c66f42c385b32175353786e1c846a8212a670c4",
"size": "5084",
"binary": false,
"copies": "2",
"ref": "refs/heads/out_branch",
"path": "ceilometer/network/statistics/opencontrail/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6027"
},
{
"name": "Python",
"bytes": "2682026"
},
{
"name": "Shell",
"bytes": "3204"
}
],
"symlink_target": ""
}
|
from .runner import TAPTestRunner
__all__ = ["TAPTestRunner"]
__version__ = "3.1"
|
{
"content_hash": "9fef90fbd93c634f93931f26ae2c2b73",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 20.75,
"alnum_prop": 0.6506024096385542,
"repo_name": "mblayman/tappy",
"id": "6d7ab3ff8ea5abb6125c3e97168ca3f0920df597",
"size": "83",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tap/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "91496"
}
],
"symlink_target": ""
}
|
"""
Basic utilities for handling avro schemas
"""
from avro import schema
def hash_func(self):
""" Monkey patching the avro library's schema module's RecordSchema class.
Patching the https://github.com/apache/avro/blob/trunk/lang/py3/avro/schema.py as the RecordSchema doesn't have a __hash__ function, but it has an __eq__ function, which means that instances
of RecordSchema are not hashable, so they cannot be added to dicts or sets, see bug I raised at https://issues.apache.org/jira/browse/AVRO-1737
Hopefully this function later will get added as __hash__ method to the RecordSchema class in the avro.schema module and this monkey patching won't be required anymore
"""
return hash(str(self))
schema.RecordSchema.__hash__ = hash_func
def parse_schema_from_string(schema_str):
"""Parse a schema given a schema string"""
return schema.Parse(schema_str)
def parse_schema_from_file(schema_path):
"""Parse a schema from a file path"""
with open(schema_path) as f:
return parse_schema_from_string(f.read())
|
{
"content_hash": "fc04cece298ffbc8e2a96e534f5f1549",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 194,
"avg_line_length": 41,
"alnum_prop": 0.7223264540337712,
"repo_name": "zoltan-fedor/python-confluent-schemaregistry",
"id": "b94387395c1301c2864d92a2bf273e381a147462",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confluent/schemaregistry/serializers/Util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41570"
}
],
"symlink_target": ""
}
|
import math
import numpy
import scipy.ndimage
from PIL import Image
from PIL import ImageDraw
from pyami import imagefun
from pyami import ellipse
from pyami import primefactor
from appionlib import apDisplay
from appionlib.apCtf import ctfpower
from appionlib.apImage import imagefile
from appionlib.apImage import imagestat
from appionlib.apImage import imagefilter
#from appionlib import lowess
###this file is not allowed to import any apCtf files - other than ctfpower
debug = False
#===================
def getCtfExtrema(focus=1.0e-6, mfreq=4.498e6, cs=2e-3,
volts=120000, ampconst=0.000, numzeros=3, zerotype="peaks"):
"""
mfreq - frequency in inverse meters = 1.0/(mpix * numcols)
"""
if debug is True:
print "defocus %.2f microns (underfocus is positive)"%(focus*1e6)
print "Freq %.1e 1/m"%(mfreq)
print "C_s %.1f mm"%(cs*1e3)
print "High tension %.1f kV"%(volts*1e-3)
if focus*1e6 > 15.0 or focus*1e6 < 0.1:
apDisplay.printWarning("atypical defocus value %.1f microns (underfocus is positive)"
%(focus*1e6))
if cs*1e3 > 7.0 or cs*1e3 < 0.4:
apDisplay.printWarning("atypical C_s value %.1f mm"%(cs*1e3))
if mfreq > 1e7 or mfreq < 1e5:
apDisplay.printWarning("atypical mfreq value %.2e 1/meters"%(mfreq))
if volts*1e-3 > 400.0 or volts*1e-3 < 60:
apDisplay.printWarning("atypical high tension value %.1f kiloVolts"%(volts*1e-3))
wavelength = getTEMLambda(volts)
a = 0.5*cs*math.pi*wavelength**3
b = -focus*math.pi*wavelength
c = -math.asin(ampconst)
if debug is True:
print "quadradtic parameters %.3e, %.3e, %.3e"%(a,b,c)
#eq: sin^2 (a r^4 + b r^2 + c) = 0
#==> a r^4 + b r^2 + c - n*pi/2 = 0
#quadradtic: r^2 = [ -b +/- sqrt( b^2 - 4*a*(c + n*pi/2)) ] / 2*a
# note: "-b + sqrt(..)" is always the positive (non-imaginary) root
## after a certain point the peaks switch direction
#peakswitch = (2.0*math.sqrt(focus/(cs*wavelength**2)))/math.pi + 0.9
#if debug is True:
# print "Peak switch", peakswitch
distances = []
for i in range(numzeros):
if zerotype.startswith("valley"):
innerroot = b**2. - 4. * a * (c + (i+1)*math.pi) ## just valleys/minima
elif zerotype.startswith("peak"):
innerroot = b**2. - 4. * a * (c + (i+0.5)*math.pi) ## just peaks/maxima
else:
innerroot = b**2. - 4. * a * (c + (i/2.0+0.5)*math.pi) ## all extrema
if innerroot < 0:
continue
root = math.sqrt(innerroot)
radsq1 = (-b + root)/(2*a)
radsq2 = (-b - root)/(2*a)
if radsq1 > 0 and radsq1 < radsq2:
rad1 = math.sqrt(radsq1)
pixeldist = rad1/mfreq
elif radsq2 > 0 and radsq2 < radsq1:
rad2 = math.sqrt(radsq2)
pixeldist = rad2/mfreq
else:
print "ERROR"
continue
distances.append(pixeldist)
if debug is True:
print "radius of zero number %d is %d pixels"%(i+1, pixeldist)
return numpy.array(distances)
#===================
def getFirstCTFzeroRadius(focus=-1.0e-6, pixelsize=1.0e-10, cs=2e-2,
volts=120000, ampconst=0.000, cols=2048):
if debug is True:
print "defocus %.2f microns"%(focus*1e6)
print "pixelsize %.3f Angstroms"%(pixelsize*1e10)
print "C_s %.1f mm"%(cs*1e2)
print "High tension %.1f kV"%(volts*1e-3)
xfreq = 1.0/( (cols-1)*2.*pixelsize )
xorigin = cols/2. - 0.5
wavelength = getTEMLambda(volts)
a = 0.5*cs*math.pi*wavelength**3
b = focus*math.pi*wavelength
c = -math.asin(ampconst)
if debug is True:
print "quadradtic parameters %.3e, %.3e, %.3e"%(a,b,c)
#eq: sin (a r^4 + b r^2 + c) = 0
#==> a r^4 + b r^2 + c = n*pi
#quadradtic: r^2 = [ -b +/- sqrt( b^2 - 4*a*(c-n*pi)) ] / 2*a
# note: "-b + sqrt(..)" is always the positive (non-imaginary) root
root = math.sqrt(b**2 - 4 * a * (c + math.pi/2.0))
radsq = (-b + root)/(2*a)
print (-b + root)/(2*a), (-b - root)/(2*a)
rad = math.sqrt(radsq)
pixeldist = rad/xfreq
if debug is True:
print "radius of first zero is %d pixels"%(pixeldist)
return pixeldist
#===================
def getTEMLambda(volts):
"""
get the relativistic wavelength of the electrons in meters from volts (not kilovolts)
see http://en.wikipedia.org/wiki/Electron_diffraction#Wavelength_of_electrons
"""
#f64 planck = 6.6260709544e-34;
#f64 e_mass = 9.10938188e-31;
#f64 e_charge = 1.60217646e-19;
#f64 c_speed = 299792458.0;
t1 = 1.2265191e-9 # This is planck/sqrt(2*e_masss*e_charge)
t2 = 9.7840893e-7 # This is e_charge/(2*e_mass*c_speed*c_speed)
wavelength = t1/math.sqrt(volts + t2 * volts**2);
if debug is True:
print "wavelength %.4f Angstroms"%(wavelength*1e10)
return wavelength
#============
def getPowerSpectraPreBin(outerresolution, apix):
if debug is True:
print "Resolution request %.3f"%(outerresolution)
print "Init max resolution %.3f"%(apix*2)
powertwo = math.log(outerresolution/apix)/math.log(2.0)-1
prebin = int(2**math.floor(powertwo))
if prebin < 1:
prebin = 1
if debug is True:
print "Pre-Binning", prebin
print "Final max resolution %.3f"%(apix*prebin*2)
return prebin
#============
def defocusRatioToEllipseRatio(defocus1, defocus2, freq, cs, volts, ampcontrast):
"""
apix and outerresolution must have same units (e.g., Anstroms or meters)
"""
radii1 = getCtfExtrema(defocus1, freq*1e10,
cs, volts, ampcontrast, numzeros=1, zerotype="valleys")
radii2 = getCtfExtrema(defocus2, freq*1e10,
cs, volts, ampcontrast, numzeros=1, zerotype="valleys")
if len(radii1) == 0 or len(radii2) == 0:
return None
ellipratio = radii1[0]/radii2[0]
return ellipratio
#============
def powerSpectraToOuterResolution(image, outerresolution, apix):
"""
apix and outerresolution must have same units (e.g., Anstroms or meters)
"""
if debug is True:
print "Computing power spectra..."
fieldsize = ctfpower.getFieldSize(image.shape)
binning = max(image.shape)/fieldsize
#data = imagefun.power(image)
data, freq = ctfpower.power(image, apix, fieldsize)
#data = numpy.exp(data)
data = data.astype(numpy.float64)
powerspec = trimPowerSpectraToOuterResolution(data, outerresolution, freq)
return powerspec, freq
#============
def trimPowerSpectraToOuterResolution(powerspec, outerresolution, freq):
"""
freq and outerresolution must have same units (e.g., Anstroms or meters)
resolution = (# columns) * apix / (pixel distance from center)
therefore:
pixel distance from center = (# columns) * apix / resolution
"""
if debug is True:
print "trimPowerSpectraToOuterResolution()"
imagewidth = powerspec.shape[0]
initmaxres = 2.0/(freq*imagewidth)
if debug is True:
print "__Image shape %d x %d"%(powerspec.shape[0], powerspec.shape[1])
print "__Frequeny %.3e"%(freq)
print "__Resolution request %.3f"%(outerresolution)
print "__Init max resolution %.3f"%(initmaxres)
if initmaxres > outerresolution:
apDisplay.printWarning("Requested resolution (%.3f) is not available (%.3f)"
%(outerresolution, initmaxres))
outerresolution = initmaxres
pixellimitradius = int(math.ceil(1./(freq * outerresolution)))
goodpixellimitradius = primefactor.getNextEvenPrime(pixellimitradius)
finalres = 1./(freq * goodpixellimitradius)
if debug is True:
print "__Pixel limit dimension: ", goodpixellimitradius
print "__Final max resolution %.3f"%(finalres)
### convert to diameter and trim
newshape = (goodpixellimitradius*2, goodpixellimitradius*2)
if debug is True:
print "__Trimming image"
trimpowerspec = imagefilter.frame_cut(powerspec, newshape)
if newshape != trimpowerspec.shape:
apDisplay.printError("shape mismatch for frame_cut (%d,%d) --> (%d,%d) = (%d,%d)"
%(powerspec.shape[0],powerspec.shape[1],
newshape[0],newshape[1],
trimpowerspec.shape[0],trimpowerspec.shape[1]))
if debug is True:
print "original image size %d x %d"%(powerspec.shape)
print "trimmed image size %d x %d"%(trimpowerspec.shape)
return trimpowerspec
#============
def draw_ellipse_to_file(jpgfile, imgarray, major, minor, angle, center=None,
numpoints=64, color="#3d3df2", width=4):
"""
major - major axis radius (in pixels)
minor - minor axis radius (in pixels)
angle - angle (in degrees)
center - position of centre of ellipse
numpoints - # of points used that make an ellipse
angle is positive toward y-axis
"""
if center is None:
center = numpy.array(imgarray.shape, dtype=numpy.float)/2.0
points = ellipse.generate_ellipse(major, minor, angle, center, numpoints, None, "step", True)
x = points[:,0]
y = points[:,1]
## wrap around to end
x = numpy.hstack((x, [x[0],]))
y = numpy.hstack((y, [y[0],]))
## convert image
originalimage = imagefile.arrayToImage(imgarray)
originalimage = originalimage.convert("RGB")
pilimage = originalimage.copy()
draw = ImageDraw.Draw(pilimage)
for i in range(len(x)-1):
xy = (x[i], y[i], x[i+1], y[i+1])
draw.line(xy, fill=color, width=width)
## create an alpha blend effect
originalimage = Image.blend(originalimage, pilimage, 0.9)
originalimage.save(jpgfile, "JPEG", quality=85)
return
#============
def rotationalAverage(image, ringwidth=3.0, innercutradius=None, full=False, median=False):
"""
compute the rotational average of a 2D numpy array
full : False -- only average complete circles (no edges/corners)
True -- rotational average out to corners of image
median : False -- calculate the mean of each ring
True -- calculate the median of each ring (slower)
"""
if debug is True:
print "ring width %.2f pixels"%(ringwidth)
shape = image.shape
## create a grid of distance from the center
xhalfshape = shape[0]/2.0
x = numpy.arange(-xhalfshape, xhalfshape, 1) + 0.5
yhalfshape = shape[1]/2.0
y = numpy.arange(-yhalfshape, yhalfshape, 1) + 0.5
xx, yy = numpy.meshgrid(x, y)
radial = xx**2 + yy**2 - 0.5
radial = numpy.sqrt(radial)
## convert to integers
radial = radial/ringwidth
radial = numpy.array(radial, dtype=numpy.int32)
if shape[0] < 32:
print radial
count = 0
if debug is True:
print "computing rotational average xdata..."
xdataint = numpy.unique(radial)
if full is False:
### trims any edge artifacts from rotational average
outercutsize = (shape[0]/2-2)/ringwidth
if debug is True:
apDisplay.printMsg("Num X points %d, Half image size %d, Trim size %d, Ringwidth %.2f, Percent trim %.1f"
%(xdataint.shape[0], shape[0]/2-2, outercutsize, ringwidth, 100.*outercutsize/float(xdataint.shape[0])))
if outercutsize > xdataint.shape[0]:
apDisplay.printWarning("Outer cut radius is larger than X size")
xdataint = xdataint[:outercutsize]
if innercutradius is not None:
innercutsize = int(math.floor(innercutradius/ringwidth))
if debug is True:
apDisplay.printMsg("Num X points %d, Half image size %d, Trim size %d, Ringwidth %.2f, Percent trim %.1f"
%(xdataint.shape[0], shape[0]/2-2, innercutsize, ringwidth, 100.*innercutsize/float(xdataint.shape[0])))
xdataint = xdataint[innercutsize:]
### remove
data = image.copy()
if median is True:
print "performing very slow median calculation loop on %d values"%(len(xdataint))
for i in xdataint:
median = numpy.median(data[radial == i])
data[radial == i] = median
if debug is True:
print "computing rotational average ydata..."
ydata = numpy.array(scipy.ndimage.mean(data, radial, xdataint))
xdata = numpy.array(xdataint, dtype=numpy.float64)*ringwidth
if debug is True:
print "... finish rotational average"
apDisplay.printMsg(" expected size of rotational average: %d"%(image.shape[0]/2))
apDisplay.printMsg("actual max size of rotational average: %d"%(xdata.max()))
return xdata, ydata
#============
def funcrad(x, xdata=None, ydata=None):
return numpy.interp(x, xdata, ydata)
#============
def unRotationalAverage(xdata, ydata, shape):
"""
compute the rotational average of a 2D numpy array
"""
image = imagefun.fromRadialFunction(funcrad, shape,
xdata=xdata, ydata=ydata, dtype=numpy.float64)
return image
#============
def rotationalAverage2D(image, ringwidth=3.0):
"""
compute the rotational average of a 2D numpy array
"""
xdata, ydata = rotationalAverage(image, ringwidth, full=True)
newimage = unRotationalAverage(xdata, ydata, image.shape)
return newimage
#============
def unEllipticalAverage(xdata, ydata, ellipratio, ellipangle, shape):
"""
compute the rotational average of a 2D numpy array
ellip angle is positive toward y-axis
"""
radial = getEllipticalDistanceArray(ellipratio, ellipangle, shape)
radial = radial/math.sqrt(ellipratio)
image = imagefun.fromRadialFunction(funcrad, shape, xdata=xdata, ydata=ydata)
def funcrc(r, c, radial, **kwargs):
rr = numpy.array(numpy.floor(r), dtype=numpy.int)
cc = numpy.array(numpy.floor(c), dtype=numpy.int)
rad = radial[rr,cc]
return funcrad(rad, **kwargs)
result = numpy.fromfunction(funcrc, shape, radial=radial,
xdata=xdata, ydata=ydata, dtype=numpy.float64)
return result
#============
def getEllipticalDistanceArray(ellipratio, ellipangle, shape):
## ellip angle is positive toward y-axis
if ellipratio < 1:
ellipratio = 1.0/ellipratio
ellipangle += 90
while ellipangle > 180:
ellipangle -= 180
while ellipangle < 0:
ellipangle += 180
if debug is True:
apDisplay.printColor("ellipangle = %.3f"%(ellipangle), "cyan")
bigshape = numpy.array(numpy.array(shape)*math.sqrt(2)/2., dtype=numpy.int)*2
xhalfshape = bigshape[0]/2.0
x = numpy.arange(-xhalfshape, xhalfshape, 1) + 0.5
yhalfshape = bigshape[1]/2.0
y = numpy.arange(-yhalfshape, yhalfshape, 1) + 0.5
xx, yy = numpy.meshgrid(x, y)
### apply ellipse ratio
yy = ellipratio*yy
radial = xx**2 + yy**2
### apply ellipse rotation
## ellip angle is positive toward y-axis, which is clockwise, so negative angle
radial = scipy.ndimage.interpolation.rotate(radial, angle=-ellipangle,
reshape=False, mode='wrap', order=1)
radial = imagefilter.frame_cut(radial, shape)
if debug is True:
print "minimal radial distance", radial.min()
radial = numpy.sqrt(radial)
return radial
#============
def ellipticalAverage(image, ellipratio, ellipangle, ringwidth=2.0, innercutradius=None, full=False):
"""
compute the elliptical average of a 2D numpy array
ellipratio: ratio of elliptical axes ( >= 1 )
= major / minor = a / b
= circle has a value of 1
ellipangle: angle of ellipse in degrees
## ellip angle is positive toward y-axis
full : False -- only average complete circles (no edges/corners)
True -- rotational average out to corners of image
median : False -- calculate the mean of each ring
True -- calculate the median of each ring (slower)
"""
if debug is True:
print "ring width %.2f pixels"%(ringwidth)
bigshape = numpy.array(numpy.array(image.shape)*math.sqrt(2)/2., dtype=numpy.int)*2
radial = getEllipticalDistanceArray(ellipratio, ellipangle, image.shape)
## need to convert to integers for scipy
radial = radial/ringwidth
radial = numpy.array(radial, dtype=numpy.int32)
if bigshape[0] < 32:
print radial
if debug is True:
print "computing elliptical average xdata..."
xdataint = numpy.unique(radial)
if full is False:
### trims any edge artifacts from rotational average
outercutsize = (bigshape[0]/2-2)/ringwidth*math.sqrt(2)/2.
if debug is True:
apDisplay.printColor("Num X points %d, Half image size %d, Outer cut size %d, Ringwidth %.2f, Percent trim %.1f"
%(xdataint.shape[0], bigshape[0]/2-2, outercutsize, ringwidth, 100.*outercutsize/float(xdataint.shape[0])), "yellow")
if outercutsize > xdataint.shape[0]:
apDisplay.printWarning("Outer cut radius is larger than X size")
xdataint = xdataint[:outercutsize]
if innercutradius is not None:
innercutsize = int(math.floor(innercutradius/ringwidth))
if debug is True:
apDisplay.printMsg("Num X points %d, Half image size %d, Trim size %d, Ringwidth %.2f, Percent trim %.1f"
%(xdataint.shape[0], bigshape[0]/2-2, innercutsize, ringwidth, 100.*innercutsize/float(xdataint.shape[0])))
xdataint = xdataint[innercutsize:]
### remove
data = image.copy()
if debug is True:
print "computing elliptical average ydata..."
ydata = numpy.array(scipy.ndimage.mean(data, radial, xdataint))
### WHAT ARE YOU DOING WITH THE SQRT ellipratio???
xdata = numpy.array(xdataint, dtype=numpy.float64)*ringwidth/math.sqrt(ellipratio)
if debug is True:
print "... finish elliptical average"
apDisplay.printMsg(" expected size of elliptical average: %d"%(bigshape[0]/2))
apDisplay.printMsg("actual max size of elliptical average: %d"%(xdata.max()))
return xdata, ydata
#============
def ellipticalArray(image, ellipratio, ellipangle):
"""
compute the elliptical average of a 2D numpy array
ellipratio: ratio of elliptical axes ( >= 1 )
= major / minor = a / b
= circle has a value of 1
ellipangle: angle of ellipse in degrees
## ellip angle is positive toward y-axis
full : False -- only average complete circles (no edges/corners)
True -- rotational average out to corners of image
"""
bigshape = numpy.array(numpy.array(image.shape)*math.sqrt(2)/2., dtype=numpy.int)*2
radial = getEllipticalDistanceArray(ellipratio, ellipangle, image.shape)
xdata = numpy.ravel(radial)/math.sqrt(ellipratio)
ydata = numpy.ravel(image)
print "Sorting data..."
xargs = numpy.argsort(xdata)
print "Applying sort..."
xdatasorted = xdata[xargs]
ydatasorted = ydata[xargs]
#end trim
outercutsize = image.shape[0]/2-2
outercutindex = numpy.searchsorted(xdatasorted, outercutsize)
print "returning values..."
return xdatasorted[:outercutindex], ydatasorted[:outercutindex]
|
{
"content_hash": "e43aa1a793a873d9fa81c442e3fc774a",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 121,
"avg_line_length": 33.68023255813954,
"alnum_prop": 0.6934806375510674,
"repo_name": "vossman/ctfeval",
"id": "c4a714ba73ed0d79e600af766461d2cb4d02baba",
"size": "17402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appionlib/apCtf/ctftools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "450194"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
"""Define APIs for the servicegroup access."""
from oslo_config import cfg
from oslo_utils import importutils
from nova.i18n import _, _LW
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_default_driver = 'db'
servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver',
default=_default_driver,
help='The driver for servicegroup '
'service (valid options are: '
'db, zk, mc)')
CONF = cfg.CONF
CONF.register_opt(servicegroup_driver_opt)
# NOTE(geekinutah): By default drivers wait 5 seconds before reporting
INITIAL_REPORTING_DELAY = 5
class API(object):
_driver = None
_driver_name_class_mapping = {
'db': 'nova.servicegroup.drivers.db.DbDriver',
'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver',
'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver'
}
def __new__(cls, *args, **kwargs):
'''Create an instance of the servicegroup API.
args and kwargs are passed down to the servicegroup driver when it gets
created. No args currently exist, though. Valid kwargs are:
db_allowed - Boolean. False if direct db access is not allowed and
alternative data access (conductor) should be used
instead.
'''
if not cls._driver:
LOG.debug('ServiceGroup driver defined as an instance of %s',
str(CONF.servicegroup_driver))
driver_name = CONF.servicegroup_driver
try:
driver_class = cls._driver_name_class_mapping[driver_name]
except KeyError:
raise TypeError(_("unknown ServiceGroup driver name: %s")
% driver_name)
cls._driver = importutils.import_object(driver_class,
*args, **kwargs)
return super(API, cls).__new__(cls)
def __init__(self, *args, **kwargs):
self.basic_config_check()
def basic_config_check(self):
"""Perform basic config check."""
# Make sure report interval is less than service down time
report_interval = CONF.report_interval
if CONF.service_down_time <= report_interval:
new_service_down_time = int(report_interval * 2.5)
LOG.warning(_LW("Report interval must be less than service down "
"time. Current config: <service_down_time: "
"%(service_down_time)s, report_interval: "
"%(report_interval)s>. Setting service_down_time "
"to: %(new_service_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': report_interval,
'new_service_down_time': new_service_down_time})
CONF.set_override('service_down_time', new_service_down_time)
def join(self, member_id, group_id, service=None):
"""Add a new member to the ServiceGroup
@param member_id: the joined member ID
@param group_id: the group name, of the joined member
@param service: the parameter can be used for notifications about
disconnect mode and update some internals
"""
LOG.debug('Join new ServiceGroup member %(member_id)s to the '
'%(group_id)s group, service = %(service)s',
{'member_id': member_id,
'group_id': group_id,
'service': service})
return self._driver.join(member_id, group_id, service)
def service_is_up(self, member):
"""Check if the given member is up."""
# NOTE(johngarbutt) no logging in this method,
# so this doesn't slow down the scheduler
return self._driver.is_up(member)
def leave(self, member_id, group_id):
"""Explicitly remove the given member from the ServiceGroup
monitoring.
"""
LOG.debug('Explicitly remove the given member %(member_id)s from the'
'%(group_id)s group monitoring',
{'member_id': member_id, 'group_id': group_id})
return self._driver.leave(member_id, group_id)
def get_all(self, group_id):
"""Returns ALL members of the given group."""
LOG.debug('Returns ALL members of the [%s] '
'ServiceGroup', group_id)
return self._driver.get_all(group_id)
def get_one(self, group_id):
"""Returns one member of the given group. The strategy to select
the member is decided by the driver (e.g. random or round-robin).
"""
LOG.debug('Returns one member of the [%s] group', group_id)
return self._driver.get_one(group_id)
|
{
"content_hash": "0cc1ce85c7ddfe1ffab6c4f863992511",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 41.67796610169491,
"alnum_prop": 0.5717771451809679,
"repo_name": "sajeeshcs/nested_quota_final",
"id": "07dcec13ba5f585fab2ae6e97294684a90d682b6",
"size": "5556",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/servicegroup/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5941"
},
{
"name": "Python",
"bytes": "15636233"
},
{
"name": "Shell",
"bytes": "17729"
},
{
"name": "XML",
"bytes": "45372"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
favorite_languages = OrderedDict()
favorite_languages['kaushik'] = 'python'
favorite_languages['sahil'] = 'javascript'
favorite_languages['prashant'] = 'java'
for name, language in favorite_languages.items():
print(name.title() + "'s favorite language is " +
language.title() + ".")
|
{
"content_hash": "33000d8337c6f5d19333bee89852817c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 29.90909090909091,
"alnum_prop": 0.7173252279635258,
"repo_name": "KT26/PythonCourse",
"id": "0464e7134b34f5a28b42a2caac54ac5f09eed974",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "8. Class/16.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52736"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.